summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2022-01-05 15:38:11 +0100
committerTakashi Iwai <tiwai@suse.de>2022-01-05 15:38:34 +0100
commitf81483aaeb59da530b286fe5d081e1705eb5c886 (patch)
treea4fed750afcdf61df3a2e3c7ab4456c515651290 /include
parentALSA: hda/realtek: Use ALC285_FIXUP_HP_GPIO_LED on another HP laptop (diff)
parentALSA: hda/cs8409: Fix Jack detection after resume (diff)
downloadlinux-f81483aaeb59da530b286fe5d081e1705eb5c886.tar.xz
linux-f81483aaeb59da530b286fe5d081e1705eb5c886.zip
Merge branch 'for-next' into for-linus
Pull 5.17 materials. Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl2.h251
-rw-r--r--include/acpi/actbl3.h9
-rw-r--r--include/acpi/actypes.h1
-rw-r--r--include/acpi/apei.h3
-rw-r--r--include/acpi/pcc.h21
-rw-r--r--include/acpi/platform/acgcc.h18
-rw-r--r--include/asm-generic/cacheflush.h6
-rw-r--r--include/asm-generic/hyperv-tlfs.h1
-rw-r--r--include/asm-generic/mshyperv.h20
-rw-r--r--include/asm-generic/sections.h89
-rw-r--r--include/asm-generic/syscall.h16
-rw-r--r--include/asm-generic/vmlinux.lds.h33
-rw-r--r--include/clocksource/arm_arch_timer.h2
-rw-r--r--include/clocksource/timer-riscv.h16
-rw-r--r--include/crypto/engine.h5
-rw-r--r--include/crypto/internal/ecc.h281
-rw-r--r--include/drm/amd_asic_type.h1
-rw-r--r--include/drm/drm_bridge.h23
-rw-r--r--include/drm/drm_connector.h37
-rw-r--r--include/drm/drm_displayid.h101
-rw-r--r--include/drm/drm_dp_helper.h26
-rw-r--r--include/drm/drm_dp_mst_helper.h5
-rw-r--r--include/drm/drm_edid.h47
-rw-r--r--include/drm/drm_format_helper.h4
-rw-r--r--include/drm/drm_ioctl.h1
-rw-r--r--include/drm/drm_mipi_dsi.h4
-rw-r--r--include/drm/drm_mode_config.h13
-rw-r--r--include/drm/drm_modeset_lock.h8
-rw-r--r--include/drm/drm_plane.h2
-rw-r--r--include/drm/drm_print.h30
-rw-r--r--include/drm/drm_probe_helper.h1
-rw-r--r--include/drm/gpu_scheduler.h188
-rw-r--r--include/drm/gud.h6
-rw-r--r--include/drm/i915_component.h1
-rw-r--r--include/drm/i915_pciids.h8
-rw-r--r--include/drm/i915_pxp_tee_interface.h42
-rw-r--r--include/drm/ttm/ttm_bo_api.h21
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/drm/ttm/ttm_caching.h17
-rw-r--r--include/drm/ttm/ttm_device.h79
-rw-r--r--include/drm/ttm/ttm_placement.h1
-rw-r--r--include/drm/ttm/ttm_pool.h5
-rw-r--r--include/drm/ttm/ttm_range_manager.h18
-rw-r--r--include/drm/ttm/ttm_resource.h9
-rw-r--r--include/drm/ttm/ttm_tt.h98
-rw-r--r--include/dt-bindings/clock/am4.h1
-rw-r--r--include/dt-bindings/clock/exynos850.h141
-rw-r--r--include/dt-bindings/clock/imx8ulp-clock.h258
-rw-r--r--include/dt-bindings/clock/ingenic,jz4725b-cgu.h (renamed from include/dt-bindings/clock/jz4725b-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4740-cgu.h (renamed from include/dt-bindings/clock/jz4740-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4760-cgu.h (renamed from include/dt-bindings/clock/jz4760-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4770-cgu.h (renamed from include/dt-bindings/clock/jz4770-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4780-cgu.h (renamed from include/dt-bindings/clock/jz4780-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,x1000-cgu.h (renamed from include/dt-bindings/clock/x1000-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,x1830-cgu.h (renamed from include/dt-bindings/clock/x1830-cgu.h)0
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h10
-rw-r--r--include/dt-bindings/clock/mt8195-clk.h864
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sc7280.h127
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8994.h13
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcm2290.h188
-rw-r--r--include/dt-bindings/clock/qcom,lpass-sc7280.h16
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h6
-rw-r--r--include/dt-bindings/leds/common.h7
-rw-r--r--include/dt-bindings/phy/phy-cadence.h2
-rw-r--r--include/dt-bindings/pinctrl/mt65xx.h9
-rw-r--r--include/dt-bindings/power/imx8mm-power.h9
-rw-r--r--include/dt-bindings/power/qcom-aoss-qmp.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h17
-rw-r--r--include/dt-bindings/reset/imx8ulp-pcc-reset.h59
-rw-r--r--include/dt-bindings/reset/mt2712-resets.h (renamed from include/dt-bindings/reset-controller/mt2712-resets.h)0
-rw-r--r--include/dt-bindings/reset/mt8173-resets.h2
-rw-r--r--include/dt-bindings/reset/mt8183-resets.h (renamed from include/dt-bindings/reset-controller/mt8183-resets.h)3
-rw-r--r--include/dt-bindings/reset/mt8192-resets.h (renamed from include/dt-bindings/reset-controller/mt8192-resets.h)0
-rw-r--r--include/dt-bindings/reset/stericsson,db8500-prcc-reset.h51
-rw-r--r--include/kunit/test.h13
-rw-r--r--include/linux/acpi.h12
-rw-r--r--include/linux/amba/bus.h18
-rw-r--r--include/linux/anon_inodes.h4
-rw-r--r--include/linux/apple-mailbox.h19
-rw-r--r--include/linux/arch_topology.h5
-rw-r--r--include/linux/arm_ffa.h2
-rw-r--r--include/linux/ata.h1
-rw-r--r--include/linux/audit.h37
-rw-r--r--include/linux/audit_arch.h24
-rw-r--r--include/linux/avf/virtchnl.h41
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/backing-dev.h26
-rw-r--r--include/linux/bio.h147
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/blk-crypto-profile.h166
-rw-r--r--include/linux/blk-integrity.h183
-rw-r--r--include/linux/blk-mq.h584
-rw-r--r--include/linux/blk_types.h55
-rw-r--r--include/linux/blkdev.h949
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/bootconfig.h31
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/bpf-cgroup.h21
-rw-r--r--include/linux/bpf.h79
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--include/linux/bpfptr.h1
-rw-r--r--include/linux/brcmphy.h11
-rw-r--r--include/linux/btf.h39
-rw-r--r--include/linux/bvec.h2
-rw-r--r--include/linux/can/bittiming.h89
-rw-r--r--include/linux/can/dev.h34
-rw-r--r--include/linux/cc_platform.h88
-rw-r--r--include/linux/cdrom.h1
-rw-r--r--include/linux/ceph/ceph_fs.h2
-rw-r--r--include/linux/ceph/osd_client.h19
-rw-r--r--include/linux/clk/tegra.h24
-rw-r--r--include/linux/cma.h1
-rw-r--r--include/linux/compiler-gcc.h18
-rw-r--r--include/linux/compiler_attributes.h11
-rw-r--r--include/linux/compiler_types.h25
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/container_of.h40
-rw-r--r--include/linux/context_tracking.h2
-rw-r--r--include/linux/counter.h715
-rw-r--r--include/linux/counter_enum.h45
-rw-r--r--include/linux/cpufreq.h169
-rw-r--r--include/linux/cpuhotplug.h6
-rw-r--r--include/linux/cpuset.h17
-rw-r--r--include/linux/crash_dump.h30
-rw-r--r--include/linux/damon.h236
-rw-r--r--include/linux/dax.h2
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/decompress/mm.h12
-rw-r--r--include/linux/delay.h2
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/device/bus.h1
-rw-r--r--include/linux/dma-buf.h13
-rw-r--r--include/linux/dma-fence.h32
-rw-r--r--include/linux/dma-resv.h224
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/dmar.h8
-rw-r--r--include/linux/dsa/8021q.h5
-rw-r--r--include/linux/dsa/ocelot.h5
-rw-r--r--include/linux/dsa/sja1105.h1
-rw-r--r--include/linux/dtpm.h26
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/elevator.h181
-rw-r--r--include/linux/elfcore.h2
-rw-r--r--include/linux/energy_model.h68
-rw-r--r--include/linux/etherdevice.h37
-rw-r--r--include/linux/ethtool.h23
-rw-r--r--include/linux/ethtool_netlink.h3
-rw-r--r--include/linux/fanotify.h9
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/filter.h29
-rw-r--r--include/linux/firewire.h11
-rw-r--r--include/linux/firmware.h30
-rw-r--r--include/linux/firmware/imx/s4.h20
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h26
-rw-r--r--include/linux/flex_proportions.h9
-rw-r--r--include/linux/fortify-string.h77
-rw-r--r--include/linux/fs.h18
-rw-r--r--include/linux/fscache.h2
-rw-r--r--include/linux/fscrypt.h3
-rw-r--r--include/linux/fsi-occ.h2
-rw-r--r--include/linux/fsl/mc.h14
-rw-r--r--include/linux/fsnotify.h58
-rw-r--r--include/linux/fsnotify_backend.h96
-rw-r--r--include/linux/ftrace.h38
-rw-r--r--include/linux/fwnode.h1
-rw-r--r--include/linux/generic-radix-tree.h3
-rw-r--r--include/linux/genhd.h44
-rw-r--r--include/linux/gfp.h30
-rw-r--r--include/linux/gpio/driver.h19
-rw-r--r--include/linux/highmem-internal.h11
-rw-r--r--include/linux/highmem.h65
-rw-r--r--include/linux/huge_mm.h15
-rw-r--r--include/linux/hugetlb.h42
-rw-r--r--include/linux/hyperv.h25
-rw-r--r--include/linux/i2c.h18
-rw-r--r--include/linux/ieee80211.h69
-rw-r--r--include/linux/iio/buffer.h11
-rw-r--r--include/linux/iio/buffer_impl.h11
-rw-r--r--include/linux/iio/common/st_sensors.h13
-rw-r--r--include/linux/iio/driver.h14
-rw-r--r--include/linux/iio/iio-opaque.h4
-rw-r--r--include/linux/iio/imu/adis.h2
-rw-r--r--include/linux/iio/triggered_buffer.h11
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h10
-rw-r--r--include/linux/instruction_pointer.h8
-rw-r--r--include/linux/intel-iommu.h13
-rw-r--r--include/linux/io-mapping.h6
-rw-r--r--include/linux/io.h5
-rw-r--r--include/linux/iomap.h16
-rw-r--r--include/linux/ipmi.h3
-rw-r--r--include/linux/ipmi_smi.h59
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/irq_work.h8
-rw-r--r--include/linux/irqchip.h20
-rw-r--r--include/linux/irqdesc.h9
-rw-r--r--include/linux/irqdomain.h4
-rw-r--r--include/linux/kallsyms.h13
-rw-r--r--include/linux/kasan.h17
-rw-r--r--include/linux/kcsan-checks.h3
-rw-r--r--include/linux/kernel.h55
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/keyslot-manager.h120
-rw-r--r--include/linux/kfence.h21
-rw-r--r--include/linux/kobject.h1
-rw-r--r--include/linux/kprobes.h113
-rw-r--r--include/linux/ksm.h4
-rw-r--r--include/linux/kvm_host.h30
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/libata.h26
-rw-r--r--include/linux/list.h4
-rw-r--r--include/linux/llist.h4
-rw-r--r--include/linux/lockd/xdr.h27
-rw-r--r--include/linux/lockd/xdr4.h29
-rw-r--r--include/linux/lockdep.h17
-rw-r--r--include/linux/lockdep_types.h2
-rw-r--r--include/linux/lsm_hook_defs.h26
-rw-r--r--include/linux/lsm_hooks.h38
-rw-r--r--include/linux/mdev.h20
-rw-r--r--include/linux/mdio.h26
-rw-r--r--include/linux/mem_encrypt.h4
-rw-r--r--include/linux/memblock.h50
-rw-r--r--include/linux/memcontrol.h273
-rw-r--r--include/linux/memory.h23
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mempolicy.h5
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/hi6421-spmi-pmic.h25
-rw-r--r--include/linux/mfd/idt8a340_reg.h31
-rw-r--r--include/linux/mfd/max77686-private.h26
-rw-r--r--include/linux/mfd/stm32-lptimer.h5
-rw-r--r--include/linux/mfd/stm32-timers.h4
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h119
-rw-r--r--include/linux/mfd/tps65912.h2
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/migrate.h28
-rw-r--r--include/linux/migrate_mode.h13
-rw-r--r--include/linux/misc_cgroup.h6
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx4/driver.h22
-rw-r--r--include/linux/mlx5/device.h63
-rw-r--r--include/linux/mlx5/driver.h62
-rw-r--r--include/linux/mlx5/eq.h1
-rw-r--r--include/linux/mlx5/eswitch.h9
-rw-r--r--include/linux/mlx5/fs.h15
-rw-r--r--include/linux/mlx5/mlx5_ifc.h450
-rw-r--r--include/linux/mm.h299
-rw-r--r--include/linux/mm_inline.h103
-rw-r--r--include/linux/mm_types.h109
-rw-r--r--include/linux/mmc/host.h8
-rw-r--r--include/linux/mmc/sdhci-pci-data.h18
-rw-r--r--include/linux/mmdebug.h20
-rw-r--r--include/linux/mmzone.h41
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mux/consumer.h23
-rw-r--r--include/linux/mux/driver.h4
-rw-r--r--include/linux/nd.h4
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/netfilter_arp/arp_tables.h5
-rw-r--r--include/linux/netfilter_bridge/ebtables.h5
-rw-r--r--include/linux/netfilter_ingress.h58
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h5
-rw-r--r--include/linux/netfilter_netdev.h146
-rw-r--r--include/linux/netfs.h89
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs4.h4
-rw-r--r--include/linux/nfs_fs.h77
-rw-r--r--include/linux/nfs_xdr.h16
-rw-r--r--include/linux/node.h4
-rw-r--r--include/linux/nvme-fc-driver.h7
-rw-r--r--include/linux/nvme-rdma.h2
-rw-r--r--include/linux/nvme.h30
-rw-r--r--include/linux/nvmem-provider.h5
-rw-r--r--include/linux/objtool.h12
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/linux/of_net.h8
-rw-r--r--include/linux/page-flags.h292
-rw-r--r--include/linux/page_idle.h99
-rw-r--r--include/linux/page_owner.h20
-rw-r--r--include/linux/page_ref.h158
-rw-r--r--include/linux/pagemap.h714
-rw-r--r--include/linux/part_stat.h1
-rw-r--r--include/linux/pci-acpi.h8
-rw-r--r--include/linux/pci.h24
-rw-r--r--include/linux/percpu-refcount.h33
-rw-r--r--include/linux/percpu.h6
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--include/linux/phy.h35
-rw-r--r--include/linux/phylink.h14
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/platform_data/brcmfmac.h2
-rw-r--r--include/linux/platform_data/cros_ec_proto.h7
-rw-r--r--include/linux/platform_data/mlxreg.h82
-rw-r--r--include/linux/platform_data/ti-sysc.h3
-rw-r--r--include/linux/platform_data/ux500_wdt.h18
-rw-r--r--include/linux/platform_data/x86/soc.h65
-rw-r--r--include/linux/plist.h5
-rw-r--r--include/linux/pm_opp.h20
-rw-r--r--include/linux/pm_wakeirq.h9
-rw-r--r--include/linux/pnfs_osd_xdr.h317
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/power/max17042_battery.h4
-rw-r--r--include/linux/preempt.h26
-rw-r--r--include/linux/property.h5
-rw-r--r--include/linux/ptrace.h22
-rw-r--r--include/linux/pwm.h13
-rw-r--r--include/linux/qed/common_hsi.h141
-rw-r--r--include/linux/qed/eth_common.h1
-rw-r--r--include/linux/qed/fcoe_common.h362
-rw-r--r--include/linux/qed/iscsi_common.h360
-rw-r--r--include/linux/qed/nvmetcp_common.h18
-rw-r--r--include/linux/qed/qed_chain.h97
-rw-r--r--include/linux/qed/qed_eth_if.h23
-rw-r--r--include/linux/qed/qed_if.h265
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_ll2_if.h42
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h17
-rw-r--r--include/linux/qed/qed_rdma_if.h3
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/radix-tree.h4
-rw-r--r--include/linux/rcupdate.h3
-rw-r--r--include/linux/rcupdate_trace.h5
-rw-r--r--include/linux/regulator/lp872x.h17
-rw-r--r--include/linux/regulator/tps62360.h6
-rw-r--r--include/linux/remoteproc.h12
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/rpmsg.h12
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/rwlock.h15
-rw-r--r--include/linux/rwlock_api_smp.h6
-rw-r--r--include/linux/rwsem.h1
-rw-r--r--include/linux/sbitmap.h35
-rw-r--r--include/linux/sched.h66
-rw-r--r--include/linux/sched/idle.h4
-rw-r--r--include/linux/sched/mm.h29
-rw-r--r--include/linux/sched/signal.h14
-rw-r--r--include/linux/sched/task.h3
-rw-r--r--include/linux/sched/task_stack.h4
-rw-r--r--include/linux/sched/topology.h9
-rw-r--r--include/linux/secretmem.h2
-rw-r--r--include/linux/security.h65
-rw-r--r--include/linux/seq_file.h19
-rw-r--r--include/linux/seqno-fence.h109
-rw-r--r--include/linux/shrinker.h1
-rw-r--r--include/linux/signal.h7
-rw-r--r--include/linux/signal_types.h3
-rw-r--r--include/linux/skbuff.h42
-rw-r--r--include/linux/skmsg.h31
-rw-r--r--include/linux/slab.h135
-rw-r--r--include/linux/slub_def.h13
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h15
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h3
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h38
-rw-r--r--include/linux/soc/qcom/smd-rpm.h2
-rw-r--r--include/linux/soc/samsung/exynos-chipid.h6
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/spi/ads7846.h15
-rw-r--r--include/linux/spi/max7301.h2
-rw-r--r--include/linux/spi/spi.h55
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/spinlock_api_smp.h9
-rw-r--r--include/linux/spinlock_up.h1
-rw-r--r--include/linux/stackdepot.h11
-rw-r--r--include/linux/stacktrace.h1
-rw-r--r--include/linux/stddef.h65
-rw-r--r--include/linux/string.h59
-rw-r--r--include/linux/string_helpers.h1
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h14
-rw-r--r--include/linux/surface_aggregator/controller.h4
-rw-r--r--include/linux/swap.h18
-rw-r--r--include/linux/swiotlb.h3
-rw-r--r--include/linux/switchtec.h1
-rw-r--r--include/linux/syscalls.h7
-rw-r--r--include/linux/t10-pi.h2
-rw-r--r--include/linux/tee_drv.h7
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/topology.h13
-rw-r--r--include/linux/torture.h8
-rw-r--r--include/linux/tpm.h1
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/trace_recursion.h76
-rw-r--r--include/linux/tty.h140
-rw-r--r--include/linux/tty_driver.h10
-rw-r--r--include/linux/tty_flip.h20
-rw-r--r--include/linux/tty_ldisc.h27
-rw-r--r--include/linux/u64_stats_sync.h10
-rw-r--r--include/linux/uio.h4
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/tegra_usb_phy.h5
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/vdpa.h53
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/vfio.h53
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_config.h6
-rw-r--r--include/linux/virtio_pci_legacy.h42
-rw-r--r--include/linux/vmalloc.h24
-rw-r--r--include/linux/vmstat.h113
-rw-r--r--include/linux/wait.h3
-rw-r--r--include/linux/workqueue.h3
-rw-r--r--include/linux/writeback.h23
-rw-r--r--include/linux/ww_mutex.h15
-rw-r--r--include/linux/xz.h106
-rw-r--r--include/linux/zstd.h1252
-rw-r--r--include/linux/zstd_errors.h77
-rw-r--r--include/linux/zstd_lib.h2432
-rw-r--r--include/media/hevc-ctrls.h11
-rw-r--r--include/media/i2c/mt9p031.h1
-rw-r--r--include/media/media-entity.h3
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/v4l2-async.h105
-rw-r--r--include/media/v4l2-dev.h3
-rw-r--r--include/media/v4l2-fwnode.h12
-rw-r--r--include/media/videobuf2-core.h59
-rw-r--r--include/memory/renesas-rpc-if.h1
-rw-r--r--include/net/9p/9p.h12
-rw-r--r--include/net/9p/client.h24
-rw-r--r--include/net/9p/transport.h26
-rw-r--r--include/net/act_api.h10
-rw-r--r--include/net/amt.h385
-rw-r--r--include/net/ax25.h13
-rw-r--r--include/net/bluetooth/bluetooth.h90
-rw-r--r--include/net/bluetooth/hci.h117
-rw-r--r--include/net/bluetooth/hci_core.h75
-rw-r--r--include/net/busy_poll.h3
-rw-r--r--include/net/cfg80211.h81
-rw-r--r--include/net/codel.h5
-rw-r--r--include/net/codel_impl.h18
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/devlink.h128
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dsa.h46
-rw-r--r--include/net/flow_dissector.h1
-rw-r--r--include/net/gen_stats.h59
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_ecn.h17
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/ioam6.h3
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip_vs.h11
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/llc.h6
-rw-r--r--include/net/llc_if.h3
-rw-r--r--include/net/mac80211.h11
-rw-r--r--include/net/mctp.h84
-rw-r--r--include/net/mctpdevice.h21
-rw-r--r--include/net/mptcp.h8
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/neighbour.h45
-rw-r--r--include/net/netfilter/nf_tables.h10
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h7
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h6
-rw-r--r--include/net/netfilter/xt_rateest.h2
-rw-r--r--include/net/page_pool.h12
-rw-r--r--include/net/pkt_cls.h6
-rw-r--r--include/net/rose.h8
-rw-r--r--include/net/sch_generic.h86
-rw-r--r--include/net/sctp/sctp.h7
-rw-r--r--include/net/sctp/sm.h6
-rw-r--r--include/net/sctp/structs.h20
-rw-r--r--include/net/sock.h137
-rw-r--r--include/net/strparser.h20
-rw-r--r--include/net/switchdev.h48
-rw-r--r--include/net/tcp.h63
-rw-r--r--include/net/tls.h16
-rw-r--r--include/net/udp.h5
-rw-r--r--include/net/xdp.h8
-rw-r--r--include/net/xdp_sock_drv.h22
-rw-r--r--include/net/xsk_buff_pool.h48
-rw-r--r--include/rdma/ib_hdrs.h1
-rw-r--r--include/rdma/ib_umem.h11
-rw-r--r--include/rdma/ib_verbs.h74
-rw-r--r--include/rdma/rdma_counter.h2
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/sas.h12
-rw-r--r--include/scsi/scsi_cmnd.h19
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--include/scsi/scsi_host.h28
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/soc/arc/timers.h4
-rw-r--r--include/soc/fsl/dpaa2-io.h9
-rw-r--r--include/soc/mscc/ocelot.h27
-rw-r--r--include/soc/mscc/ocelot_vcap.h10
-rw-r--r--include/soc/qcom/spm.h43
-rw-r--r--include/soc/tegra/fuse.h31
-rw-r--r--include/soc/tegra/irq.h9
-rw-r--r--include/soc/tegra/pm.h2
-rw-r--r--include/sound/hda_codec.h8
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/hdaudio_ext.h27
-rw-r--r--include/sound/intel-nhlt.h37
-rw-r--r--include/sound/memalloc.h14
-rw-r--r--include/sound/pcm.h3
-rw-r--r--include/target/target_core_base.h9
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/bpf_probe.h19
-rw-r--r--include/trace/events/afs.h21
-rw-r--r--include/trace/events/block.h6
-rw-r--r--include/trace/events/devlink.h72
-rw-r--r--include/trace/events/erofs.h2
-rw-r--r--include/trace/events/f2fs.h33
-rw-r--r--include/trace/events/fs.h122
-rw-r--r--include/trace/events/io_uring.h61
-rw-r--r--include/trace/events/mctp.h75
-rw-r--r--include/trace/events/mmap_lock.h48
-rw-r--r--include/trace/events/nfs.h375
-rw-r--r--include/trace/events/pagemap.h46
-rw-r--r--include/trace/events/rpcgss.h18
-rw-r--r--include/trace/events/rpcrdma.h240
-rw-r--r--include/trace/events/sunrpc.h135
-rw-r--r--include/trace/events/sunrpc_base.h18
-rw-r--r--include/trace/events/vmscan.h38
-rw-r--r--include/trace/events/writeback.h35
-rw-r--r--include/uapi/asm-generic/fcntl.h4
-rw-r--r--include/uapi/asm-generic/signal-defs.h1
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/asm-generic/unistd.h5
-rw-r--r--include/uapi/drm/amdgpu_drm.h13
-rw-r--r--include/uapi/drm/drm_fourcc.h12
-rw-r--r--include/uapi/drm/drm_mode.h4
-rw-r--r--include/uapi/drm/i915_drm.h242
-rw-r--r--include/uapi/drm/mga_drm.h22
-rw-r--r--include/uapi/drm/v3d_drm.h78
-rw-r--r--include/uapi/drm/virtgpu_drm.h27
-rw-r--r--include/uapi/linux/acrn.h70
-rw-r--r--include/uapi/linux/amt.h62
-rw-r--r--include/uapi/linux/audit.h7
-rw-r--r--include/uapi/linux/bcache.h445
-rw-r--r--include/uapi/linux/bpf.h76
-rw-r--r--include/uapi/linux/btf.h55
-rw-r--r--include/uapi/linux/btrfs.h11
-rw-r--r--include/uapi/linux/can/netlink.h31
-rw-r--r--include/uapi/linux/cdrom.h19
-rw-r--r--include/uapi/linux/counter.h154
-rw-r--r--include/uapi/linux/devlink.h2
-rw-r--r--include/uapi/linux/dlm_device.h4
-rw-r--r--include/uapi/linux/ethtool.h29
-rw-r--r--include/uapi/linux/ethtool_netlink.h21
-rw-r--r--include/uapi/linux/fanotify.h8
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--include/uapi/linux/futex.h25
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/io_uring.h1
-rw-r--r--include/uapi/linux/ioam6_iptunnel.h29
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/ipmi.h16
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kvm.h30
-rw-r--r--include/uapi/linux/map_to_14segment.h241
-rw-r--r--include/uapi/linux/mctp.h18
-rw-r--r--include/uapi/linux/mdio.h9
-rw-r--r--include/uapi/linux/mptcp.h35
-rw-r--r--include/uapi/linux/neighbour.h35
-rw-r--r--include/uapi/linux/netfilter.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h6
-rw-r--r--include/uapi/linux/nfsd/nfsfh.h115
-rw-r--r--include/uapi/linux/nitro_enclaves.h10
-rw-r--r--include/uapi/linux/nl80211-vnd-intel.h29
-rw-r--r--include/uapi/linux/nl80211.h115
-rw-r--r--include/uapi/linux/pci_regs.h6
-rw-r--r--include/uapi/linux/perf_event.h34
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--include/uapi/linux/prctl.h5
-rw-r--r--include/uapi/linux/rtc.h31
-rw-r--r--include/uapi/linux/smc.h44
-rw-r--r--include/uapi/linux/stddef.h37
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/tls.h30
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/vdpa.h7
-rw-r--r--include/uapi/linux/videodev2.h31
-rw-r--r--include/uapi/linux/virtio_gpio.h27
-rw-r--r--include/uapi/linux/virtio_gpu.h18
-rw-r--r--include/uapi/linux/virtio_i2c.h6
-rw-r--r--include/uapi/linux/virtio_mem.h9
-rw-r--r--include/uapi/linux/vm_sockets.h13
-rw-r--r--include/uapi/misc/habanalabs.h84
-rw-r--r--include/uapi/rdma/efa-abi.h18
-rw-r--r--include/uapi/rdma/rdma_netlink.h5
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h14
-rw-r--r--include/uapi/sound/asoc.h4
-rw-r--r--include/uapi/sound/asound.h5
-rw-r--r--include/xen/arm/hypercall.h15
-rw-r--r--include/xen/balloon.h3
-rw-r--r--include/xen/interface/callback.h19
-rw-r--r--include/xen/interface/elfnote.h19
-rw-r--r--include/xen/interface/event_channel.h2
-rw-r--r--include/xen/interface/features.h2
-rw-r--r--include/xen/interface/grant_table.h19
-rw-r--r--include/xen/interface/hvm/dm_op.h19
-rw-r--r--include/xen/interface/hvm/hvm_op.h20
-rw-r--r--include/xen/interface/hvm/hvm_vcpu.h19
-rw-r--r--include/xen/interface/hvm/params.h20
-rw-r--r--include/xen/interface/hvm/start_info.h19
-rw-r--r--include/xen/interface/io/9pfs.h19
-rw-r--r--include/xen/interface/io/blkif.h2
-rw-r--r--include/xen/interface/io/console.h2
-rw-r--r--include/xen/interface/io/displif.h19
-rw-r--r--include/xen/interface/io/fbif.h19
-rw-r--r--include/xen/interface/io/kbdif.h19
-rw-r--r--include/xen/interface/io/netif.h19
-rw-r--r--include/xen/interface/io/pciif.h19
-rw-r--r--include/xen/interface/io/protocols.h2
-rw-r--r--include/xen/interface/io/pvcalls.h2
-rw-r--r--include/xen/interface/io/ring.h19
-rw-r--r--include/xen/interface/io/sndif.h19
-rw-r--r--include/xen/interface/io/vscsiif.h19
-rw-r--r--include/xen/interface/io/xenbus.h2
-rw-r--r--include/xen/interface/io/xs_wire.h2
-rw-r--r--include/xen/interface/memory.h2
-rw-r--r--include/xen/interface/nmi.h2
-rw-r--r--include/xen/interface/physdev.h20
-rw-r--r--include/xen/interface/platform.h19
-rw-r--r--include/xen/interface/sched.h19
-rw-r--r--include/xen/interface/vcpu.h19
-rw-r--r--include/xen/interface/version.h2
-rw-r--r--include/xen/interface/xen-mca.h1
-rw-r--r--include/xen/interface/xen.h19
-rw-r--r--include/xen/interface/xenpmu.h2
-rw-r--r--include/xen/pci.h28
-rw-r--r--include/xen/xen.h6
633 files changed, 19357 insertions, 9215 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 13d93371790e..480f9207a4c6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -278,6 +278,7 @@ struct acpi_device_power {
int state; /* Current state */
struct acpi_device_power_flags flags;
struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */
+ u8 state_for_enumeration; /* Deepest power state for enumeration */
};
struct acpi_dep_data {
@@ -570,7 +571,6 @@ struct acpi_bus_type {
bool (*match)(struct device *dev);
struct acpi_device * (*find_companion)(struct device *);
void (*setup)(struct device *);
- void (*cleanup)(struct device *);
};
int register_acpi_bus_type(struct acpi_bus_type *);
int unregister_acpi_bus_type(struct acpi_bus_type *);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index fa02e3ff0faf..73ba13914321 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20210730
+#define ACPI_CA_VERSION 0x20210930
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a47b32a5cbde..71ca090fd61b 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -35,6 +35,7 @@
#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
+#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */
@@ -46,7 +47,6 @@
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
-#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */
#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
/*
@@ -978,6 +978,7 @@ struct acpi_madt_multiproc_wakeup_mailbox {
/* MADT Local APIC flags */
#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
+#define ACPI_MADT_ONLINE_CAPABLE (2) /* 01: System HW supports enabling processor at runtime */
/* MADT MPS INTI flags (inti_flags) */
@@ -1411,6 +1412,254 @@ struct nfit_device_handle {
/*******************************************************************************
*
+ * NHLT - Non HD Audio Link Table
+ *
+ * Conforms to: Intel Smart Sound Technology NHLT Specification
+ * Version 0.8.1, January 2020.
+ *
+ ******************************************************************************/
+
+/* Main table */
+
+struct acpi_table_nhlt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 endpoint_count;
+};
+
+struct acpi_nhlt_endpoint {
+ u32 descriptor_length;
+ u8 link_type;
+ u8 instance_id;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u32 subsystem_id;
+ u8 device_type;
+ u8 direction;
+ u8 virtual_bus_id;
+};
+
+/* Types for link_type field above */
+
+#define ACPI_NHLT_RESERVED_HD_AUDIO 0
+#define ACPI_NHLT_RESERVED_DSP 1
+#define ACPI_NHLT_PDM 2
+#define ACPI_NHLT_SSP 3
+#define ACPI_NHLT_RESERVED_SLIMBUS 4
+#define ACPI_NHLT_RESERVED_SOUNDWIRE 5
+#define ACPI_NHLT_TYPE_RESERVED 6 /* 6 and above are reserved */
+
+/* All other values above are reserved */
+
+/* Values for device_id field above */
+
+#define ACPI_NHLT_PDM_DMIC 0xAE20
+#define ACPI_NHLT_BT_SIDEBAND 0xAE30
+#define ACPI_NHLT_I2S_TDM_CODECS 0xAE23
+
+/* Values for device_type field above */
+
+/* SSP Link */
+
+#define ACPI_NHLT_LINK_BT_SIDEBAND 0
+#define ACPI_NHLT_LINK_FM 1
+#define ACPI_NHLT_LINK_MODEM 2
+/* 3 is reserved */
+#define ACPI_NHLT_LINK_SSP_ANALOG_CODEC 4
+
+/* PDM Link */
+
+#define ACPI_NHLT_PDM_ON_CAVS_1P8 0
+#define ACPI_NHLT_PDM_ON_CAVS_1P5 1
+
+/* Values for Direction field above */
+
+#define ACPI_NHLT_DIR_RENDER 0
+#define ACPI_NHLT_DIR_CAPTURE 1
+#define ACPI_NHLT_DIR_RENDER_LOOPBACK 2
+#define ACPI_NHLT_DIR_RENDER_FEEDBACK 3
+#define ACPI_NHLT_DIR_RESERVED 4 /* 4 and above are reserved */
+
+struct acpi_nhlt_device_specific_config {
+ u32 capabilities_size;
+ u8 virtual_slot;
+ u8 config_type;
+};
+
+struct acpi_nhlt_device_specific_config_a {
+ u32 capabilities_size;
+ u8 virtual_slot;
+ u8 config_type;
+ u8 array_type;
+};
+
+/* Values for Config Type above */
+
+#define ACPI_NHLT_TYPE_MIC_ARRAY 0x01
+#define ACPI_NHLT_TYPE_GENERIC 0x00
+
+/* Mask for Extension field of array_type */
+
+#define ACPI_NHLT_ARRAY_TYPE_MASK 0x10
+
+struct acpi_nhlt_device_specific_config_b {
+ u32 capabilities_size;
+};
+
+struct acpi_nhlt_device_specific_config_c {
+ u32 capabilities_size;
+ u8 virtual_slot;
+};
+
+struct acpi_nhlt_wave_extensible {
+ u16 format_tag;
+ u16 channel_count;
+ u32 samples_per_sec;
+ u32 avg_bytes_per_sec;
+ u16 block_align;
+ u16 bits_per_sample;
+ u16 extra_format_size;
+ u16 valid_bits_per_sample;
+ u32 channel_mask;
+ u8 sub_format_guid[16];
+};
+
+/* Values for channel_mask above */
+
+#define ACPI_NHLT_SPKR_FRONT_LEFT 0x1
+#define ACPI_NHLT_SPKR_FRONT_RIGHT 0x2
+#define ACPI_NHLT_SPKR_FRONT_CENTER 0x4
+#define ACPI_NHLT_SPKR_LOW_FREQ 0x8
+#define ACPI_NHLT_SPKR_BACK_LEFT 0x10
+#define ACPI_NHLT_SPKR_BACK_RIGHT 0x20
+#define ACPI_NHLT_SPKR_FRONT_LEFT_OF_CENTER 0x40
+#define ACPI_NHLT_SPKR_FRONT_RIGHT_OF_CENTER 0x80
+#define ACPI_NHLT_SPKR_BACK_CENTER 0x100
+#define ACPI_NHLT_SPKR_SIDE_LEFT 0x200
+#define ACPI_NHLT_SPKR_SIDE_RIGHT 0x400
+#define ACPI_NHLT_SPKR_TOP_CENTER 0x800
+#define ACPI_NHLT_SPKR_TOP_FRONT_LEFT 0x1000
+#define ACPI_NHLT_SPKR_TOP_FRONT_CENTER 0x2000
+#define ACPI_NHLT_SPKR_TOP_FRONT_RIGHT 0x4000
+#define ACPI_NHLT_SPKR_TOP_BACK_LEFT 0x8000
+#define ACPI_NHLT_SPKR_TOP_BACK_CENTER 0x10000
+#define ACPI_NHLT_SPKR_TOP_BACK_RIGHT 0x20000
+
+struct acpi_nhlt_format_config {
+ struct acpi_nhlt_wave_extensible format;
+ u32 capability_size;
+ u8 capabilities[];
+};
+
+struct acpi_nhlt_formats_config {
+ u8 formats_count;
+};
+
+struct acpi_nhlt_device_specific_hdr {
+ u8 virtual_slot;
+ u8 config_type;
+};
+
+/* Types for config_type above */
+
+#define ACPI_NHLT_GENERIC 0
+#define ACPI_NHLT_MIC 1
+#define ACPI_NHLT_RENDER 3
+
+struct acpi_nhlt_mic_device_specific_config {
+ struct acpi_nhlt_device_specific_hdr device_config;
+ u8 array_type_ext;
+};
+
+/* Values for array_type_ext above */
+
+#define SMALL_LINEAR_2ELEMENT 0x0A
+#define BIG_LINEAR_2ELEMENT 0x0B
+#define FIRST_GEOMETRY_LINEAR_4ELEMENT 0x0C
+#define PLANAR_LSHAPED_4ELEMENT 0x0D
+#define SECOND_GEOMETRY_LINEAR_4ELEMENT 0x0E
+#define VENDOR_DEFINED 0x0F
+#define ARRAY_TYPE_MASK 0x0F
+#define ARRAY_TYPE_EXT_MASK 0x10
+
+#define NO_EXTENSION 0x0
+#define MIC_SNR_SENSITIVITY_EXT 0x1
+
+struct acpi_nhlt_vendor_mic_config {
+ u8 type;
+ u8 panel;
+ u16 speaker_position_distance; // mm
+ u16 horizontal_offset; // mm
+ u16 vertical_offset; // mm
+ u8 frequency_low_band; // 5*hz
+ u8 frequency_high_band; // 500*hz
+ u16 direction_angle; // -180 - + 180
+ u16 elevation_angle; // -180 - + 180
+ u16 work_vertical_angle_begin; // -180 - + 180 with 2 deg step
+ u16 work_vertical_angle_end; // -180 - + 180 with 2 deg step
+ u16 work_horizontal_angle_begin; // -180 - + 180 with 2 deg step
+ u16 work_horizontal_angle_end; // -180 - + 180 with 2 deg step
+};
+
+/* Values for Type field above */
+
+#define MIC_OMNIDIRECTIONAL 0
+#define MIC_SUBCARDIOID 1
+#define MIC_CARDIOID 2
+#define MIC_SUPER_CARDIOID 3
+#define MIC_HYPER_CARDIOID 4
+#define MIC_8_SHAPED 5
+#define MIC_VENDOR_DEFINED 7
+
+/* Values for Panel field above */
+
+#define MIC_TOP 0
+#define MIC_BOTTOM 1
+#define MIC_LEFT 2
+#define MIC_RIGHT 3
+#define MIC_FRONT 4
+#define MIC_REAR 5
+
+struct acpi_nhlt_vendor_mic_device_specific_config {
+ struct acpi_nhlt_mic_device_specific_config mic_array_device_config;
+ u8 number_of_microphones;
+ struct acpi_nhlt_vendor_mic_config mic_config[]; // indexed by number_of_microphones
+};
+
+/* Microphone SNR and Sensitivity extension */
+
+struct acpi_nhlt_mic_snr_sensitivity_extension {
+ u32 SNR;
+ u32 sensitivity;
+};
+
+struct acpi_nhlt_render_feedback_device_specific_config {
+ struct acpi_nhlt_device_specific_config device_config;
+ u8 feedback_virtual_slot; // render slot in case of capture
+ u16 feedback_channels; // informative only
+ u16 feedback_valid_bits_per_sample;
+};
+
+/* Linux-specific structures */
+
+struct acpi_nhlt_linux_specific_count {
+ u8 structure_count;
+};
+
+struct acpi_nhlt_linux_specific_data {
+ u8 device_id[16];
+ u8 device_instance_id;
+ u8 device_port_id;
+ u8 filler[18];
+};
+
+struct acpi_nhlt_table_terminator {
+ u32 terminator_value;
+ u32 terminator_signature;
+};
+
+/*******************************************************************************
+ *
* PCCT - Platform Communications Channel Table (ACPI 5.0)
* Version 2 (ACPI 6.2)
*
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 9125e2f16329..edbf1ad8206d 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -191,7 +191,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */
- ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */
+ ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */
+ ACPI_SRAT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
};
/*
@@ -272,7 +273,11 @@ struct acpi_srat_gic_its_affinity {
u32 its_id;
};
-/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */
+/*
+ * Common structure for SRAT subtable types:
+ * 5: ACPI_SRAT_TYPE_GENERIC_AFFINITY
+ * 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
+ */
struct acpi_srat_generic_affinity {
struct acpi_subtable_header header;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 92c71dfce0d5..ff8b3c913f21 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1281,6 +1281,7 @@ typedef enum {
#define ACPI_OSI_WIN_10_RS4 0x12
#define ACPI_OSI_WIN_10_RS5 0x13
#define ACPI_OSI_WIN_10_19H1 0x14
+#define ACPI_OSI_WIN_10_20H1 0x15
/* Definitions of getopt */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 680f80960c3d..ece0a8af2bae 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -37,9 +37,6 @@ void __init acpi_hest_init(void);
static inline void acpi_hest_init(void) { return; }
#endif
-typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
-int apei_hest_parse(apei_hest_func_t func, void *data);
-
int erst_write(const struct cper_record_header *record);
ssize_t erst_get_record_count(void);
int erst_get_record_id_begin(int *pos);
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 4dec4ed138cd..73e806fe7ce7 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -9,18 +9,27 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
+struct pcc_mbox_chan {
+ struct mbox_chan *mchan;
+ u64 shmem_base_addr;
+ u64 shmem_size;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+};
+
#define MAX_PCC_SUBSPACES 256
#ifdef CONFIG_PCC
-extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id);
-extern void pcc_mbox_free_channel(struct mbox_chan *chan);
+extern struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
+extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan);
#else
-static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id)
+static inline struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
return ERR_PTR(-ENODEV);
}
-static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
+static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { }
#endif
#endif /* _PCC_H */
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index fb172a03a753..33ad282bd338 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -10,21 +10,13 @@
#ifndef __ACGCC_H__
#define __ACGCC_H__
-/*
- * Use compiler specific <stdarg.h> is a good practice for even when
- * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
- */
#ifndef va_arg
-#ifdef ACPI_USE_BUILTIN_STDARG
-typedef __builtin_va_list va_list;
-#define va_start(v, l) __builtin_va_start(v, l)
-#define va_end(v) __builtin_va_end(v)
-#define va_arg(v, l) __builtin_va_arg(v, l)
-#define va_copy(d, s) __builtin_va_copy(d, s)
-#else
+#ifdef __KERNEL__
#include <linux/stdarg.h>
-#endif
-#endif
+#else
+#include <stdarg.h>
+#endif /* __KERNEL__ */
+#endif /* ! va_arg */
#define ACPI_INLINE __inline__
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4a674db4e1fa..fedc0dfa4877 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -49,9 +49,15 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
static inline void flush_dcache_page(struct page *page)
{
}
+
+static inline void flush_dcache_folio(struct folio *folio) { }
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
#endif
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+void flush_dcache_folio(struct folio *folio);
+#endif
#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index 56348a541c50..8ed6733d5146 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -158,6 +158,7 @@ struct ms_hyperv_tsc_page {
#define HVCALL_RETARGET_INTERRUPT 0x007e
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
/* Extended hypercalls */
#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index d3eae6cdbacb..3e2248ac328e 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -35,7 +35,17 @@ struct ms_hyperv_info {
u32 max_vp_index;
u32 max_lp_index;
u32 isolation_config_a;
- u32 isolation_config_b;
+ union {
+ u32 isolation_config_b;
+ struct {
+ u32 cvm_type : 4;
+ u32 reserved1 : 1;
+ u32 shared_gpa_boundary_active : 1;
+ u32 shared_gpa_boundary_bits : 6;
+ u32 reserved2 : 20;
+ };
+ };
+ u64 shared_gpa_boundary;
};
extern struct ms_hyperv_info ms_hyperv;
@@ -44,6 +54,7 @@ extern void __percpu **hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+extern bool hv_isolation_type_snp(void);
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
@@ -254,12 +265,19 @@ bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
+bool hv_isolation_type_snp(void);
+u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
+static inline bool hv_is_isolation_supported(void) { return false; }
+static inline enum hv_isolation_type hv_get_isolation_type(void)
+{
+ return HV_ISOLATION_TYPE_NONE;
+}
#endif /* CONFIG_HYPERV */
#endif
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d16302d3eb59..1dfadb2e878d 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -64,36 +64,6 @@ extern __visible const void __nosave_begin, __nosave_end;
#define dereference_kernel_function_descriptor(p) ((void *)(p))
#endif
-/* random extra sections (if any). Override
- * in asm/sections.h */
-#ifndef arch_is_kernel_text
-static inline int arch_is_kernel_text(unsigned long addr)
-{
- return 0;
-}
-#endif
-
-#ifndef arch_is_kernel_data
-static inline int arch_is_kernel_data(unsigned long addr)
-{
- return 0;
-}
-#endif
-
-/*
- * Check if an address is part of freed initmem. This is needed on architectures
- * with virt == phys kernel mapping, for code that wants to check if an address
- * is part of a static object within [_stext, _end]. After initmem is freed,
- * memory can be allocated from it, and such allocations would then have
- * addresses within the range [_stext, _end].
- */
-#ifndef arch_is_kernel_initmem_freed
-static inline int arch_is_kernel_initmem_freed(unsigned long addr)
-{
- return 0;
-}
-#endif
-
/**
* memory_contains - checks if an object is contained within a memory region
* @begin: virtual address of the beginning of the memory region
@@ -159,6 +129,22 @@ static inline bool init_section_intersects(void *virt, size_t size)
}
/**
+ * is_kernel_core_data - checks if the pointer address is located in the
+ * .data section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .data, false otherwise.
+ * Note: On some archs it may return true for core RODATA, and false
+ * for others. But will always be true for core RW data.
+ */
+static inline bool is_kernel_core_data(unsigned long addr)
+{
+ return addr >= (unsigned long)_sdata &&
+ addr < (unsigned long)_edata;
+}
+
+/**
* is_kernel_rodata - checks if the pointer address is located in the
* .rodata section
*
@@ -172,4 +158,47 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata;
}
+/**
+ * is_kernel_inittext - checks if the pointer address is located in the
+ * .init.text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .init.text, false otherwise.
+ */
+static inline bool is_kernel_inittext(unsigned long addr)
+{
+ return addr >= (unsigned long)_sinittext &&
+ addr < (unsigned long)_einittext;
+}
+
+/**
+ * __is_kernel_text - checks if the pointer address is located in the
+ * .text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .text, false otherwise.
+ * Note: an internal helper, only check the range of _stext to _etext.
+ */
+static inline bool __is_kernel_text(unsigned long addr)
+{
+ return addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_etext;
+}
+
+/**
+ * __is_kernel - checks if the pointer address is located in the kernel range
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in the kernel range, false otherwise.
+ * Note: an internal helper, only check the range of _stext to _end.
+ */
+static inline bool __is_kernel(unsigned long addr)
+{
+ return addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_end;
+}
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 524218ae3825..81695eb02a12 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -118,22 +118,6 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned long *args);
/**
- * syscall_set_arguments - change system call parameter value
- * @task: task of interest, must be in system call entry tracing
- * @regs: task_pt_regs() of @task
- * @args: array of argument values to store
- *
- * Changes 6 arguments to the system call.
- * The first argument gets value @args[0], and so on.
- *
- * It's only valid to call this when @task is stopped for tracing on
- * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
- * %SYSCALL_WORK_SYSCALL_AUDIT.
- */
-void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- const unsigned long *args);
-
-/**
* syscall_get_arch - return the AUDIT_ARCH for the current system call
* @task: task of interest, must be blocked
*
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f2984af2b85b..42f3866bca69 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -164,16 +164,22 @@
* Need to also make ftrace_stub_graph point to ftrace_stub
* so that the same stub location may have different protocols
* and not mess up with C verifiers.
+ *
+ * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
+ * as some archs will have a different prototype for that function
+ * but ftrace_ops_list_func() will have a single prototype.
*/
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
KEEP(*(__patchable_function_entries)) \
__stop_mcount_loc = .; \
- ftrace_stub_graph = ftrace_stub;
+ ftrace_stub_graph = ftrace_stub; \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
-# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
+# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
# define MCOUNT_REC()
# endif
@@ -470,13 +476,7 @@
__end_pci_fixups_suspend_late = .; \
} \
\
- /* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
- __start_builtin_fw = .; \
- KEEP(*(.builtin_fw)) \
- __end_builtin_fw = .; \
- } \
- \
+ FW_LOADER_BUILT_IN_DATA \
TRACEDATA \
\
PRINTK_INDEX \
@@ -869,10 +869,11 @@
KEEP(*(.orc_unwind)) \
__stop_orc_unwind = .; \
} \
+ text_size = _etext - _stext; \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
orc_lookup = .; \
- . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
+ . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
orc_lookup_end = .; \
}
@@ -880,6 +881,18 @@
#define ORC_UNWIND_TABLE
#endif
+/* Built-in firmware blobs */
+#ifdef CONFIG_FW_LOADER
+#define FW_LOADER_BUILT_IN_DATA \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
+ __start_builtin_fw = .; \
+ KEEP(*(.builtin_fw)) \
+ __end_builtin_fw = .; \
+ }
+#else
+#define FW_LOADER_BUILT_IN_DATA
+#endif
+
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 73c7139c866f..e715bdb720d5 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -24,7 +24,7 @@
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
- ARCH_TIMER_REG_TVAL,
+ ARCH_TIMER_REG_CVAL,
};
enum arch_timer_ppi_nr {
diff --git a/include/clocksource/timer-riscv.h b/include/clocksource/timer-riscv.h
new file mode 100644
index 000000000000..d7f455754e60
--- /dev/null
+++ b/include/clocksource/timer-riscv.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __TIMER_RISCV_H
+#define __TIMER_RISCV_H
+
+#include <linux/types.h>
+
+extern void riscv_cs_get_mult_shift(u32 *mult, u32 *shift);
+
+#endif
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 26cac19b0f46..fd4f2fa23f51 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -16,6 +16,7 @@
#include <crypto/akcipher.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
+#include <crypto/kpp.h>
#define ENGINE_NAME_LEN 30
/*
@@ -96,6 +97,8 @@ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
+int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
+ struct kpp_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
void crypto_finalize_aead_request(struct crypto_engine *engine,
@@ -104,6 +107,8 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
+void crypto_finalize_kpp_request(struct crypto_engine *engine,
+ struct kpp_request *req, int err);
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
new file mode 100644
index 000000000000..4f6c1a68882f
--- /dev/null
+++ b/include/crypto/internal/ecc.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _CRYPTO_ECC_H
+#define _CRYPTO_ECC_H
+
+#include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
+
+/* One digit is u64 qword. */
+#define ECC_CURVE_NIST_P192_DIGITS 3
+#define ECC_CURVE_NIST_P256_DIGITS 4
+#define ECC_CURVE_NIST_P384_DIGITS 6
+#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
+
+#define ECC_DIGITS_TO_BYTES_SHIFT 3
+
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+
+/**
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in: Input array
+ * @out: Output array
+ * @ndigits: Number of digits to copy
+ */
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
+{
+ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
+}
+
+/**
+ * ecc_is_key_valid() - Validate a given ECDH private key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key to be used for the given curve
+ * @private_key_len: private key length
+ *
+ * Returns 0 if the key is acceptable, a negative value otherwise
+ */
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, unsigned int private_key_len);
+
+/**
+ * ecc_gen_privkey() - Generates an ECC private key.
+ * The private key is a random integer in the range 0 < random < n, where n is a
+ * prime that is the order of the cyclic subgroup generated by the distinguished
+ * point G.
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve number of digits
+ * @private_key: buffer for storing the generated private key
+ *
+ * Returns 0 if the private key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey);
+
+/**
+ * ecc_make_pub_key() - Compute an ECC public key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: pregenerated private key for the given curve
+ * @public_key: buffer for storing the generated public key
+ *
+ * Returns 0 if the public key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, u64 *public_key);
+
+/**
+ * crypto_ecdh_shared_secret() - Compute a shared secret
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key of part A
+ * @public_key: public key of counterpart B
+ * @secret: buffer for storing the calculated shared secret
+ *
+ * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Returns 0 if the shared secret was generated successfully, a negative value
+ * if an error occurred.
+ */
+int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, const u64 *public_key,
+ u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli: vli to check.
+ * @ndigits: length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left: vli
+ * @right: vli
+ * @ndigits: length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result: where to write result
+ * @left: vli
+ * @right vli
+ * @ndigits: length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 BE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 LE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result: where to write vli number
+ * @input: vli value to operate on
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result: where to write result value
+ * @left: vli number to multiply with @right
+ * @right: vli number to multiply with @left
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits);
+
+/**
+ * vli_num_bits() - Counts the number of bits required for vli.
+ *
+ * @vli: vli to check.
+ * @ndigits: Length of the @vli
+ *
+ * Return: The number of bits required to represent @vli.
+ */
+unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
+
+/**
+ * ecc_aloc_point() - Allocate ECC point.
+ *
+ * @ndigits: Length of vlis in u64 qwords.
+ *
+ * Return: Pointer to the allocated point or NULL if allocation failed.
+ */
+struct ecc_point *ecc_alloc_point(unsigned int ndigits);
+
+/**
+ * ecc_free_point() - Free ECC point.
+ *
+ * @p: The point to free.
+ */
+void ecc_free_point(struct ecc_point *p);
+
+/**
+ * ecc_point_is_zero() - Check if point is zero.
+ *
+ * @p: Point to check for zero.
+ *
+ * Return: true if point is the point at infinity, false otherwise.
+ */
+bool ecc_point_is_zero(const struct ecc_point *point);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result: resulting point
+ * @x: scalar to multiply with @p
+ * @p: point to multiply with @x
+ * @y: scalar to multiply with @q
+ * @q: point to multiply with @y
+ * @curve: curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *x, const struct ecc_point *p,
+ const u64 *y, const struct ecc_point *q,
+ const struct ecc_curve *curve);
+
+#endif
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 0f66a0d9f06d..90b69270f2fa 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -62,6 +62,7 @@ enum amd_asic_type {
CHIP_DIMGREY_CAVEFISH, /* 33 */
CHIP_BEIGE_GOBY, /* 34 */
CHIP_YELLOW_CARP, /* 35 */
+ CHIP_IP_DISCOVERY, /* 36 */
CHIP_LAST,
};
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 46bdfa48c413..061d87313fac 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -790,11 +790,19 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
void drm_bridge_add(struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
-struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous,
enum drm_bridge_attach_flags flags);
+#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+#else
+static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
@@ -914,4 +922,17 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
#endif
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
+ u32 port, u32 endpoint);
+#else
+static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1647960c9e50..379746d3266f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -590,6 +590,18 @@ struct drm_display_info {
* @monitor_range: Frequency range supported by monitor range descriptor
*/
struct drm_monitor_range_info monitor_range;
+
+ /**
+ * @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
+ * the DisplayID VESA vendor block. 0 for conventional Single-Stream
+ * Transport (SST), or 2 or 4 MSO streams.
+ */
+ u8 mso_stream_count;
+
+ /**
+ * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+ */
+ u8 mso_pixel_overlap;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -1084,6 +1096,14 @@ struct drm_connector_funcs {
*/
void (*atomic_print_state)(struct drm_printer *p,
const struct drm_connector_state *state);
+
+ /**
+ * @oob_hotplug_event:
+ *
+ * This will get called when a hotplug-event for a drm-connector
+ * has been received from a source outside the display driver / device.
+ */
+ void (*oob_hotplug_event)(struct drm_connector *connector);
};
/**
@@ -1228,6 +1248,14 @@ struct drm_connector {
struct device *kdev;
/** @attr: sysfs attributes */
struct device_attribute *attr;
+ /**
+ * @fwnode: associated fwnode supplied by platform firmware
+ *
+ * Drivers can set this to associate a fwnode with a connector, drivers
+ * are expected to get a reference on the fwnode when setting this.
+ * drm_connector_cleanup() will call fwnode_handle_put() on this.
+ */
+ struct fwnode_handle *fwnode;
/**
* @head:
@@ -1239,6 +1267,14 @@ struct drm_connector {
*/
struct list_head head;
+ /**
+ * @global_connector_list_entry:
+ *
+ * Connector entry in the global connector-list, used by
+ * drm_connector_find_by_fwnode().
+ */
+ struct list_head global_connector_list_entry;
+
/** @base: base KMS object */
struct drm_mode_object base;
@@ -1650,6 +1686,7 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode);
const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index ec64d141f578..7ffbd9f7bfc7 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -23,38 +23,71 @@
#define DRM_DISPLAYID_H
#include <linux/types.h>
+#include <linux/bits.h>
struct edid;
-#define DATA_BLOCK_PRODUCT_ID 0x00
-#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
-#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
-#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
-#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
-#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
-#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
-#define DATA_BLOCK_VESA_TIMING 0x07
-#define DATA_BLOCK_CEA_TIMING 0x08
-#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
-#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
-#define DATA_BLOCK_GP_ASCII_STRING 0x0b
-#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
-#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
-#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
-#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
-#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
-#define DATA_BLOCK_TILED_DISPLAY 0x12
-#define DATA_BLOCK_CTA 0x81
-
-#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
-
-#define PRODUCT_TYPE_EXTENSION 0
-#define PRODUCT_TYPE_TEST 1
-#define PRODUCT_TYPE_PANEL 2
-#define PRODUCT_TYPE_MONITOR 3
-#define PRODUCT_TYPE_TV 4
-#define PRODUCT_TYPE_REPEATER 5
-#define PRODUCT_TYPE_DIRECT_DRIVE 6
+#define VESA_IEEE_OUI 0x3a0292
+
+/* DisplayID Structure versions */
+#define DISPLAY_ID_STRUCTURE_VER_12 0x12
+#define DISPLAY_ID_STRUCTURE_VER_20 0x20
+
+/* DisplayID Structure v1r2 Data Blocks */
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+#define DATA_BLOCK_CTA 0x81
+
+/* DisplayID Structure v2r0 Data Blocks */
+#define DATA_BLOCK_2_PRODUCT_ID 0x20
+#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21
+#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22
+#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23
+#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24
+#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25
+#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26
+#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
+#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
+#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
+#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
+
+/* DisplayID Structure v1r2 Product Type */
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */
+#define PRIMARY_USE_EXTENSION 0
+#define PRIMARY_USE_TEST 1
+#define PRIMARY_USE_GENERIC 2
+#define PRIMARY_USE_TV 3
+#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4
+#define PRIMARY_USE_DESKTOP_GAMING 5
+#define PRIMARY_USE_PRESENTATION 6
+#define PRIMARY_USE_HEAD_MOUNTED_VR 7
+#define PRIMARY_USE_HEAD_MOUNTED_AR 8
struct displayid_header {
u8 rev;
@@ -96,6 +129,16 @@ struct displayid_detailed_timing_block {
struct displayid_detailed_timings_1 timings[];
};
+#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
+#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
+
+struct displayid_vesa_vendor_specific_block {
+ struct displayid_block base;
+ u8 oui[3];
+ u8 data_structure_type;
+ u8 mso;
+} __packed;
+
/* DisplayID iteration */
struct displayid_iter {
const struct edid *edid;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1d5b3dbb6e56..b52df4db3e8f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -453,6 +453,7 @@ struct drm_panel;
# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */
@@ -537,6 +538,9 @@ struct drm_panel;
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+/* DFP Capability Extension */
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
@@ -688,6 +692,7 @@ struct drm_panel;
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
# define DP_DECOMPRESSION_EN (1 << 0)
+#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE BIT(0)
@@ -743,6 +748,7 @@ struct drm_panel;
# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
+# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
@@ -865,6 +871,8 @@ struct drm_panel;
# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4
# define DP_PHY_TEST_PATTERN_CP2520 0x5
+#define DP_PHY_SQUARE_PATTERN 0x249
+
#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A
#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
@@ -1109,6 +1117,18 @@ struct drm_panel;
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
+
+/* DSC Extended Capability Branch Total DSC Resources */
+#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */
+# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+# define DP_DSC_DECODER_COUNT_SHIFT 5
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */
+# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
+
/* Protocol Converter Extension */
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
#define DP_CEC_TUNNELING_CAPABILITY 0x3000
@@ -1319,6 +1339,10 @@ struct drm_panel;
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
+# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
+/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
+#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
enum drm_dp_phy {
DP_PHY_DPRX,
@@ -1490,6 +1514,8 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
unsigned int lane);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index ddb9231d0309..78044ac5b59b 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -554,6 +554,8 @@ struct drm_dp_mst_topology_state {
struct drm_private_state base;
struct list_head vcpis;
struct drm_dp_mst_topology_mgr *mgr;
+ u8 total_avail_slots;
+ u8 start_slot;
};
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
@@ -806,6 +808,7 @@ int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
@@ -815,7 +818,7 @@ int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
int pbn);
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index deccfd39e6db..18f6c700f6d0 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -508,6 +508,52 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
}
+/**
+ * drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
+ * @vend_chr_0: First character of the vendor string.
+ * @vend_chr_1: Second character of the vendor string.
+ * @vend_chr_2: Third character of the vendor string.
+ * @product_id: The 16-bit product ID.
+ *
+ * This is a macro so that it can be calculated at compile time and used
+ * as an initializer.
+ *
+ * For instance:
+ * drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
+ *
+ * Return: a 32-bit ID per panel.
+ */
+#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
+ ((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
+ (((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
+ (((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
+ ((product_id) & 0xffff))
+
+/**
+ * drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id()
+ * @panel_id: The panel ID to decode.
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
+ * @product_id: The product ID will be returned here.
+ *
+ * For instance, after:
+ * drm_edid_decode_panel_id(0x09e52d08, vend, &product_id)
+ * These will be true:
+ * vend[0] = 'B'
+ * vend[1] = 'O'
+ * vend[2] = 'E'
+ * vend[3] = '\0'
+ * product_id = 0x2d08
+ */
+static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id)
+{
+ *product_id = (u16)(panel_id & 0xffff);
+ vend[0] = '@' + ((panel_id >> 26) & 0x1f);
+ vend[1] = '@' + ((panel_id >> 21) & 0x1f);
+ vend[2] = '@' + ((panel_id >> 16) & 0x1f);
+ vend[3] = '\0';
+}
+
bool drm_probe_ddc(struct i2c_adapter *adapter);
struct edid *drm_do_get_edid(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
@@ -515,6 +561,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
+u32 drm_edid_get_panel_id(struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 4e0258a61311..e86925cf07b9 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -16,12 +16,16 @@ void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vadd
struct drm_rect *clip);
void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool cached);
+void drm_fb_xrgb8888_to_rgb332(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip, bool swab);
void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip);
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index afb27cb6a7bd..6ed61c371f6c 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -167,7 +167,6 @@ struct drm_ioctl_desc {
.name = #ioctl \
}
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
#ifdef CONFIG_COMPAT
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index af7ba8071eb0..147e51b6d241 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -227,9 +227,13 @@ struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi);
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 1ddf7783fdf7..48b7de80daf5 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -103,14 +103,13 @@ struct drm_mode_config_funcs {
* Callback used by helpers to inform the driver of output configuration
* changes.
*
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
+ * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
+ * to call this hook to inform the fbdev helper of output changes.
*
- * FIXME:
- *
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
+ * This hook is deprecated, drivers should instead use
+ * drm_fbdev_generic_setup() which takes care of any necessary
+ * hotplug event forwarding already without further involvement by
+ * the driver.
*/
void (*output_poll_changed)(struct drm_device *dev);
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index aafd07388eb7..b84693fbd2b5 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -24,6 +24,8 @@
#ifndef DRM_MODESET_LOCK_H_
#define DRM_MODESET_LOCK_H_
+#include <linux/types.h> /* stackdepot.h is not self-contained */
+#include <linux/stackdepot.h>
#include <linux/ww_mutex.h>
struct drm_modeset_lock;
@@ -52,6 +54,12 @@ struct drm_modeset_acquire_ctx {
struct drm_modeset_lock *contended;
/*
+ * Stack depot for debugging when a contended lock was not backed off
+ * from.
+ */
+ depot_stack_handle_t stack_depot;
+
+ /*
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index fed97e35626f..0c1102dc4d88 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -43,7 +43,7 @@ enum drm_scaling_filter {
/**
* struct drm_plane_state - mutable plane state
*
- * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
* @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
* raw coordinates provided by userspace. Drivers should use
* drm_atomic_helper_check_plane_state() and only use the derived rectangles in
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 15a089a87c22..22fabdeed297 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -340,6 +340,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_ERROR() - Error output.
*
+ * NOTE: this is deprecated in favor of drm_err() or dev_err().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -349,6 +351,9 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_ERROR_RATELIMITED() - Rate limited error output.
*
+ * NOTE: this is deprecated in favor of drm_err_ratelimited() or
+ * dev_err_ratelimited().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*
@@ -364,9 +369,11 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
+/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */
#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
static bool __print_once __read_mostly; \
@@ -379,6 +386,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG() - Debug output for generic drm code
*
+ * NOTE: this is deprecated in favor of drm_dbg_core().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -387,6 +396,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver
*
+ * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -395,6 +406,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_KMS() - Debug output for modesetting code
*
+ * NOTE: this is deprecated in favor of drm_dbg_kms().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -480,47 +493,63 @@ void __drm_err(const char *format, ...);
#define _DRM_PRINTK(once, level, fmt, ...) \
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info(). */
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice(). */
#define DRM_NOTE(fmt, ...) \
_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn(). */
#define DRM_WARN(fmt, ...) \
_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info_once(). */
#define DRM_INFO_ONCE(fmt, ...) \
_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice_once(). */
#define DRM_NOTE_ONCE(fmt, ...) \
_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn_once(). */
#define DRM_WARN_ONCE(fmt, ...) \
_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err(). */
#define DRM_ERROR(fmt, ...) \
__drm_err(fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */
#define DRM_ERROR_RATELIMITED(fmt, ...) \
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */
#define DRM_DEBUG(fmt, ...) \
__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */
#define DRM_DEBUG_DRIVER(fmt, ...) \
__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */
#define DRM_DEBUG_KMS(fmt, ...) \
__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */
#define DRM_DEBUG_PRIME(fmt, ...) \
__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */
#define DRM_DEBUG_ATOMIC(fmt, ...) \
__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */
#define DRM_DEBUG_VBL(fmt, ...) \
__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */
#define DRM_DEBUG_LEASE(fmt, ...) \
__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */
#define DRM_DEBUG_DP(fmt, ...) \
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
@@ -536,6 +565,7 @@ void __drm_err(const char *format, ...);
#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
__DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms_ratelimited(NULL, ...). */
#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
/*
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 8d3ed2834d34..04c57564c397 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -18,6 +18,7 @@ int drm_helper_probe_detect(struct drm_connector *connector,
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
void drm_kms_helper_hotplug_event(struct drm_device *dev);
void drm_kms_helper_poll_disable(struct drm_device *dev);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 88ae7f331bb1..f011e4c407f2 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,9 +27,12 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
#include <linux/completion.h>
+#include <linux/xarray.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+struct drm_gem_object;
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
@@ -50,56 +53,147 @@ enum drm_sched_priority {
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
*
- * @list: used to append this struct to the list of entities in the
- * runqueue.
- * @rq: runqueue on which this entity is currently scheduled.
- * @sched_list: A list of schedulers (drm_gpu_schedulers).
- * Jobs from this entity can be scheduled on any scheduler
- * on this list.
- * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
- * @priority: priority of the entity
- * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @job_queue: the list of jobs of this entity.
- * @fence_seq: a linearly increasing seqno incremented with each
- * new &drm_sched_fence which is part of the entity.
- * @fence_context: a unique context for all the fences which belong
- * to this entity.
- * The &drm_sched_fence.scheduled uses the
- * fence_context but &drm_sched_fence.finished uses
- * fence_context + 1.
- * @dependency: the dependency fence of the job which is on the top
- * of the job queue.
- * @cb: callback for the dependency fence above.
- * @guilty: points to ctx's guilty.
- * @fini_status: contains the exit status in case the process was signalled.
- * @last_scheduled: points to the finished fence of the last scheduled job.
- * @last_user: last group leader pushing a job into the entity.
- * @stopped: Marks the enity as removed from rq and destined for termination.
- * @entity_idle: Signals when enityt is not in use
- *
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
*/
struct drm_sched_entity {
+ /**
+ * @list:
+ *
+ * Used to append this struct to the list of entities in the runqueue
+ * @rq under &drm_sched_rq.entities.
+ *
+ * Protected by &drm_sched_rq.lock of @rq.
+ */
struct list_head list;
+
+ /**
+ * @rq:
+ *
+ * Runqueue on which this entity is currently scheduled.
+ *
+ * FIXME: Locking is very unclear for this. Writers are protected by
+ * @rq_lock, but readers are generally lockless and seem to just race
+ * with not even a READ_ONCE.
+ */
struct drm_sched_rq *rq;
+
+ /**
+ * @sched_list:
+ *
+ * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
+ * be scheduled on any scheduler on this list.
+ *
+ * This can be modified by calling drm_sched_entity_modify_sched().
+ * Locking is entirely up to the driver, see the above function for more
+ * details.
+ *
+ * This will be set to NULL if &num_sched_list equals 1 and @rq has been
+ * set already.
+ *
+ * FIXME: This means priority changes through
+ * drm_sched_entity_set_priority() will be lost henceforth in this case.
+ */
struct drm_gpu_scheduler **sched_list;
+
+ /**
+ * @num_sched_list:
+ *
+ * Number of drm_gpu_schedulers in the @sched_list.
+ */
unsigned int num_sched_list;
+
+ /**
+ * @priority:
+ *
+ * Priority of the entity. This can be modified by calling
+ * drm_sched_entity_set_priority(). Protected by &rq_lock.
+ */
enum drm_sched_priority priority;
+
+ /**
+ * @rq_lock:
+ *
+ * Lock to modify the runqueue to which this entity belongs.
+ */
spinlock_t rq_lock;
+ /**
+ * @job_queue: the list of jobs of this entity.
+ */
struct spsc_queue job_queue;
+ /**
+ * @fence_seq:
+ *
+ * A linearly increasing seqno incremented with each new
+ * &drm_sched_fence which is part of the entity.
+ *
+ * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
+ * this doesn't need to be atomic.
+ */
atomic_t fence_seq;
+
+ /**
+ * @fence_context:
+ *
+ * A unique context for all the fences which belong to this entity. The
+ * &drm_sched_fence.scheduled uses the fence_context but
+ * &drm_sched_fence.finished uses fence_context + 1.
+ */
uint64_t fence_context;
+ /**
+ * @dependency:
+ *
+ * The dependency fence of the job which is on the top of the job queue.
+ */
struct dma_fence *dependency;
+
+ /**
+ * @cb:
+ *
+ * Callback for the dependency fence above.
+ */
struct dma_fence_cb cb;
+
+ /**
+ * @guilty:
+ *
+ * Points to entities' guilty.
+ */
atomic_t *guilty;
+
+ /**
+ * @last_scheduled:
+ *
+ * Points to the finished fence of the last scheduled job. Only written
+ * by the scheduler thread, can be accessed locklessly from
+ * drm_sched_job_arm() iff the queue is empty.
+ */
struct dma_fence *last_scheduled;
+
+ /**
+ * @last_user: last group leader pushing a job into the entity.
+ */
struct task_struct *last_user;
+
+ /**
+ * @stopped:
+ *
+ * Marks the enity as removed from rq and destined for
+ * termination. This is set by calling drm_sched_entity_flush() and by
+ * drm_sched_fini().
+ */
bool stopped;
+
+ /**
+ * @entity_idle:
+ *
+ * Signals when entity is not in use, used to sequence entity cleanup in
+ * drm_sched_entity_fini().
+ */
struct completion entity_idle;
};
@@ -198,6 +292,17 @@ struct drm_sched_job {
enum drm_sched_priority s_priority;
struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+ /**
+ * @dependencies:
+ *
+ * Contains the dependencies as struct dma_fence for this job, see
+ * drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies().
+ */
+ struct xarray dependencies;
+
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned long last_dependency;
};
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
@@ -220,9 +325,15 @@ enum drm_gpu_sched_stat {
*/
struct drm_sched_backend_ops {
/**
- * @dependency: Called when the scheduler is considering scheduling
- * this job next, to get another struct dma_fence for this job to
- * block on. Once it returns NULL, run_job() may be called.
+ * @dependency:
+ *
+ * Called when the scheduler is considering scheduling this job next, to
+ * get another struct dma_fence for this job to block on. Once it
+ * returns NULL, run_job() may be called.
+ *
+ * If a driver exclusively uses drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies() this can be ommitted and
+ * left as NULL.
*/
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
@@ -348,6 +459,14 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_job_arm(struct drm_sched_job *job);
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write);
+
+
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
@@ -381,14 +500,17 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
-struct drm_sched_fence *drm_sched_fence_create(
+struct drm_sched_fence *drm_sched_fence_alloc(
struct drm_sched_entity *s_entity, void *owner);
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity);
+void drm_sched_fence_free(struct drm_sched_fence *fence);
+
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
diff --git a/include/drm/gud.h b/include/drm/gud.h
index 0b46b54fe56e..c52a8ba4ae4e 100644
--- a/include/drm/gud.h
+++ b/include/drm/gud.h
@@ -246,10 +246,12 @@ struct gud_state_req {
/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
#define GUD_REQ_GET_FORMATS 0x40
#define GUD_FORMATS_MAX_NUM 32
- /* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */
- #define GUD_PIXEL_FORMAT_R1 0x01
+ #define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */
+ #define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */
#define GUD_PIXEL_FORMAT_XRGB1111 0x20
+ #define GUD_PIXEL_FORMAT_RGB332 0x30
#define GUD_PIXEL_FORMAT_RGB565 0x40
+ #define GUD_PIXEL_FORMAT_RGB888 0x50
#define GUD_PIXEL_FORMAT_XRGB8888 0x80
#define GUD_PIXEL_FORMAT_ARGB8888 0x81
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 55c3b123581b..c1e2a43d2d1e 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -29,6 +29,7 @@
enum i915_component_type {
I915_COMPONENT_AUDIO = 1,
I915_COMPONENT_HDCP,
+ I915_COMPONENT_PXP
};
/* MAX_PORT is the number of port
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index eee18fa53b54..c00ac54692d7 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -632,18 +632,16 @@
INTEL_VGA_DEVICE(0x4905, info), \
INTEL_VGA_DEVICE(0x4906, info), \
INTEL_VGA_DEVICE(0x4907, info), \
- INTEL_VGA_DEVICE(0x4908, info)
+ INTEL_VGA_DEVICE(0x4908, info), \
+ INTEL_VGA_DEVICE(0x4909, info)
/* ADL-S */
#define INTEL_ADLS_IDS(info) \
INTEL_VGA_DEVICE(0x4680, info), \
- INTEL_VGA_DEVICE(0x4681, info), \
INTEL_VGA_DEVICE(0x4682, info), \
- INTEL_VGA_DEVICE(0x4683, info), \
INTEL_VGA_DEVICE(0x4688, info), \
- INTEL_VGA_DEVICE(0x4689, info), \
+ INTEL_VGA_DEVICE(0x468A, info), \
INTEL_VGA_DEVICE(0x4690, info), \
- INTEL_VGA_DEVICE(0x4691, info), \
INTEL_VGA_DEVICE(0x4692, info), \
INTEL_VGA_DEVICE(0x4693, info)
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h
new file mode 100644
index 000000000000..af593ec64469
--- /dev/null
+++ b/include/drm/i915_pxp_tee_interface.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I915_PXP_TEE_INTERFACE_H_
+#define _I915_PXP_TEE_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+/**
+ * struct i915_pxp_component_ops - ops for PXP services.
+ * @owner: Module providing the ops
+ * @send: sends data to PXP
+ * @receive: receives data from PXP
+ */
+struct i915_pxp_component_ops {
+ /**
+ * @owner: owner of the module provding the ops
+ */
+ struct module *owner;
+
+ int (*send)(struct device *dev, const void *message, size_t size);
+ int (*recv)(struct device *dev, void *buffer, size_t size);
+};
+
+/**
+ * struct i915_pxp_component - Used for communication between i915 and TEE
+ * drivers for the PXP services
+ * @tee_dev: device that provide the PXP service from TEE Bus.
+ * @pxp_ops: Ops implemented by TEE driver, used by i915 driver.
+ */
+struct i915_pxp_component {
+ struct device *tee_dev;
+ const struct i915_pxp_component_ops *ops;
+
+ /* To protect the above members. */
+ struct mutex mutex;
+};
+
+#endif /* _I915_TEE_PXP_INTERFACE_H_ */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index f681bbdbc698..cd785cfa3123 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -265,18 +265,6 @@ static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_opera
}
/**
- * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
- *
- * @placement: Return immediately if buffer is busy.
- * @mem: The struct ttm_resource indicating the region where the bo resides
- * @new_flags: Describes compatible placement found
- *
- * Returns true if the placement is compatible
- */
-bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem,
- uint32_t *new_flags);
-
-/**
* ttm_bo_validate
*
* @bo: The buffer object.
@@ -363,9 +351,10 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
- * @flags: Initial placement flags.
+ * @placement: Initial placement for buffer object.
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
+ * @sg: Scatter-gather table.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -406,7 +395,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
- * @flags: Initial placement flags.
+ * @placement: Initial placement for buffer object.
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
@@ -414,6 +403,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
+ * @sg: Scatter-gather table.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -594,8 +584,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
- pgoff_t num_prefault,
- pgoff_t fault_page_size);
+ pgoff_t num_prefault);
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 68d6069572aa..5f087575194b 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -322,7 +322,7 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
*/
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
-void ttm_move_memcpy(struct ttm_buffer_object *bo,
+void ttm_move_memcpy(bool clear,
u32 num_pages,
struct ttm_kmap_iter *dst_iter,
struct ttm_kmap_iter *src_iter);
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index 3c9dd65f5aaf..235a743d90e1 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -27,9 +27,26 @@
#define TTM_NUM_CACHING_TYPES 3
+/**
+ * enum ttm_caching - CPU caching and BUS snooping behavior.
+ */
enum ttm_caching {
+ /**
+ * @ttm_uncached: Most defensive option for device mappings,
+ * don't even allow write combining.
+ */
ttm_uncached,
+
+ /**
+ * @ttm_write_combined: Don't cache read accesses, but allow at least
+ * writes to be combined.
+ */
ttm_write_combined,
+
+ /**
+ * @ttm_cached: Fully cached like normal system memory, requires that
+ * devices snoop the CPU cache on accesses.
+ */
ttm_cached
};
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index cd592f8e941b..0a4ddec78d8f 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -39,31 +39,23 @@ struct ttm_operation_ctx;
/**
* struct ttm_global - Buffer object driver global data.
- *
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
*/
extern struct ttm_global {
/**
- * Constant after init.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages. Constant after init.
*/
-
struct page *dummy_read_page;
/**
- * Protected by ttm_global_mutex.
+ * @device_list: List of buffer object devices. Protected by
+ * ttm_global_mutex.
*/
struct list_head device_list;
/**
- * Internal protection.
+ * @bo_count: Number of buffer objects allocated by devices.
*/
atomic_t bo_count;
} ttm_glob;
@@ -73,7 +65,7 @@ struct ttm_device_funcs {
* ttm_tt_create
*
* @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -230,49 +222,64 @@ struct ttm_device_funcs {
/**
* struct ttm_device - Buffer object driver device-specific data.
- *
- * @device_list: Our entry in the global device list.
- * @funcs: Function table for the device.
- * @sysman: Resource manager for the system domain.
- * @man_drv: An array of resource_managers.
- * @vma_manager: Address space manager.
- * @pool: page pool for the device.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
*/
struct ttm_device {
- /*
+ /**
+ * @device_list: Our entry in the global device list.
* Constant after bo device init
*/
struct list_head device_list;
+
+ /**
+ * @funcs: Function table for the device.
+ * Constant after bo device init
+ */
struct ttm_device_funcs *funcs;
- /*
+ /**
+ * @sysman: Resource manager for the system domain.
* Access via ttm_manager_type.
*/
struct ttm_resource_manager sysman;
+
+ /**
+ * @man_drv: An array of resource_managers, one per resource type.
+ */
struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
- /*
- * Protected by internal locks.
+ /**
+ * @vma_manager: Address space manager for finding BOs to mmap.
*/
struct drm_vma_offset_manager *vma_manager;
+
+ /**
+ * @pool: page pool for the device.
+ */
struct ttm_pool pool;
- /*
- * Protection for the per manager LRU and ddestroy lists.
+ /**
+ * @lru_lock: Protection for the per manager LRU and ddestroy lists.
*/
spinlock_t lru_lock;
+
+ /**
+ * @ddestroy: Destroyed but not yet cleaned up buffer objects.
+ */
struct list_head ddestroy;
- /*
- * Protected by load / firstopen / lastclose /unload sync.
+ /**
+ * @pinned: Buffer objects which are pinned and so not on any LRU list.
+ */
+ struct list_head pinned;
+
+ /**
+ * @dev_mapping: A pointer to the struct address_space for invalidating
+ * CPU mappings on buffer move. Protected by load/unload sync.
*/
struct address_space *dev_mapping;
- /*
- * Internal protection.
+ /**
+ * @wq: Work queue structure for the delayed delete workqueue.
*/
struct delayed_work wq;
};
@@ -284,12 +291,15 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
{
+ BUILD_BUG_ON(__builtin_constant_p(mem_type)
+ && mem_type >= TTM_NUM_MEM_TYPES);
return bdev->man_drv[mem_type];
}
static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
struct ttm_resource_manager *manager)
{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
bdev->man_drv[type] = manager;
}
@@ -298,5 +308,6 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32);
void ttm_device_fini(struct ttm_device *bdev);
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
#endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 8995c9e4ec1b..76d1b9119a2b 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -58,6 +58,7 @@
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
+ * @mem_type: One of TTM_PL_* where the resource should be allocated from.
* @flags: memory domain and caching flags for the object
*
* Structure indicating a possible place to put an object.
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 4321728bdd11..ef09b23d29e3 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -37,7 +37,7 @@ struct ttm_pool;
struct ttm_operation_ctx;
/**
- * ttm_pool_type - Pool for a certain memory type
+ * struct ttm_pool_type - Pool for a certain memory type
*
* @pool: the pool we belong to, might be NULL for the global ones
* @order: the allocation order our pages have
@@ -58,8 +58,9 @@ struct ttm_pool_type {
};
/**
- * ttm_pool - Pool for all caching and orders
+ * struct ttm_pool - Pool for all caching and orders
*
+ * @dev: the device we allocate pages for
* @use_dma_alloc: if coherent DMA allocations should be used
* @use_dma32: if GFP_DMA32 should be used
* @caching: pools for each caching/order
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
index 22b6fa42ac20..7963b957e9ef 100644
--- a/include/drm/ttm/ttm_range_manager.h
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -4,6 +4,7 @@
#define _TTM_RANGE_MANAGER_H_
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
#include <drm/drm_mm.h>
/**
@@ -33,10 +34,23 @@ to_ttm_range_mgr_node(struct ttm_resource *res)
return container_of(res, struct ttm_range_mgr_node, base);
}
-int ttm_range_man_init(struct ttm_device *bdev,
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size);
-int ttm_range_man_fini(struct ttm_device *bdev,
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type);
+static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
+ unsigned int type, bool use_tt,
+ unsigned long p_size)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
+}
+static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
+ unsigned int type)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_fini_nocheck(bdev, type);
+}
#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 140b6b9a8bbe..5952051091cd 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -40,6 +40,7 @@ struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
struct ttm_buffer_object;
+struct ttm_placement;
struct dma_buf_map;
struct io_mapping;
struct sg_table;
@@ -102,10 +103,7 @@ struct ttm_resource_manager_func {
* struct ttm_resource_manager
*
* @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
+ * @use_tt: If a TT object should be used for the backing store.
* @size: Size of the managed region.
* @func: structure pointer implementing the range manager. See above
* @move_lock: lock for move fence
@@ -143,6 +141,7 @@ struct ttm_resource_manager {
* @addr: mapped virtual address
* @offset: physical addr
* @is_iomem: is this io memory ?
+ * @caching: See enum ttm_caching
*
* Structure indicating the bus placement of an object.
*/
@@ -266,6 +265,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
+bool ttm_resource_compat(struct ttm_resource *res,
+ struct ttm_placement *placement);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index b20e89d321b0..f20832139815 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -38,36 +38,70 @@ struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
-
-#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
-
/**
- * struct ttm_tt
- *
- * @pages: Array of pages backing the data.
- * @page_flags: see TTM_PAGE_FLAG_*
- * @num_pages: Number of pages in the page array.
- * @sg: for SG objects via dma-buf
- * @dma_address: The DMA (bus) addresses of the pages
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @pages_list: used by some page allocation backend
- * @caching: The current caching state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * struct ttm_tt - This is a structure holding the pages, caching- and aperture
+ * binding status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_tt {
+ /** @pages: Array of pages backing the data. */
struct page **pages;
+ /**
+ * @page_flags: The page flags.
+ *
+ * Supported values:
+ *
+ * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
+ * and swapped out by TTM. Calling ttm_tt_populate() will then swap the
+ * pages back in, and unset the flag. Drivers should in general never
+ * need to touch this.
+ *
+ * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
+ * allocation.
+ *
+ * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
+ * externally, like with dma-buf or userptr. This effectively disables
+ * TTM swapping out such pages. Also important is to prevent TTM from
+ * ever directly mapping these pages.
+ *
+ * Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable
+ * this flag.
+ *
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as
+ * TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is
+ * still valid to use TTM to map the pages directly. This is useful when
+ * implementing a ttm_tt backend which still allocates driver owned
+ * pages underneath(say with shmem).
+ *
+ * Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage
+ * here should always be:
+ *
+ * page_flags = TTM_TT_FLAG_EXTERNAL |
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ *
+ * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
+ * set by TTM after ttm_tt_populate() has successfully returned, and is
+ * then unset when TTM calls ttm_tt_unpopulate().
+ */
+#define TTM_TT_FLAG_SWAPPED (1 << 0)
+#define TTM_TT_FLAG_ZERO_ALLOC (1 << 1)
+#define TTM_TT_FLAG_EXTERNAL (1 << 2)
+#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3)
+
+#define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
uint32_t page_flags;
+ /** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
+ /** @sg: for SG objects via dma-buf. */
struct sg_table *sg;
+ /** @dma_address: The DMA (bus) addresses of the pages. */
dma_addr_t *dma_address;
+ /** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
+ /**
+ * @caching: The current caching state of the pages, see enum
+ * ttm_caching.
+ */
enum ttm_caching caching;
};
@@ -85,7 +119,7 @@ struct ttm_kmap_iter_tt {
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
{
- return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
+ return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
}
/**
@@ -104,7 +138,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* @caching: the desired caching state of the pages
*
* Create a struct ttm_tt to back data with system memory pages.
@@ -127,8 +161,9 @@ int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
void ttm_tt_fini(struct ttm_tt *ttm);
/**
- * ttm_ttm_destroy:
+ * ttm_tt_destroy:
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy common struct ttm_tt.
@@ -136,13 +171,6 @@ void ttm_tt_fini(struct ttm_tt *ttm);
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
- * ttm_tt_destroy_common:
- *
- * Called from driver to destroy common path.
- */
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
-
-/**
* ttm_tt_swapin:
*
* @ttm: The struct ttm_tt.
@@ -156,15 +184,19 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
/**
* ttm_tt_populate - allocate pages for a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
+ * @ctx: operation context for populating the tt object.
*
* Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to free all pages from a ttm
@@ -181,7 +213,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
*/
static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
{
- ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
}
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
@@ -197,7 +229,7 @@ struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
*
* @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
index d961e7cb3682..4be6c5961f34 100644
--- a/include/dt-bindings/clock/am4.h
+++ b/include/dt-bindings/clock/am4.h
@@ -158,6 +158,7 @@
#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68)
#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70)
#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220)
+#define AM4_L3S_ADC1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x230)
#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238)
#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240)
#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248)
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
new file mode 100644
index 000000000000..8999184f94a2
--- /dev/null
+++ b/include/dt-bindings/clock/exynos850.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Device Tree binding constants for Exynos850 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_850_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_850_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_MMC_PLL 3
+#define CLK_MOUT_SHARED0_PLL 4
+#define CLK_MOUT_SHARED1_PLL 5
+#define CLK_MOUT_MMC_PLL 6
+#define CLK_MOUT_CORE_BUS 7
+#define CLK_MOUT_CORE_CCI 8
+#define CLK_MOUT_CORE_MMC_EMBD 9
+#define CLK_MOUT_CORE_SSS 10
+#define CLK_MOUT_DPU 11
+#define CLK_MOUT_HSI_BUS 12
+#define CLK_MOUT_HSI_MMC_CARD 13
+#define CLK_MOUT_HSI_USB20DRD 14
+#define CLK_MOUT_PERI_BUS 15
+#define CLK_MOUT_PERI_UART 16
+#define CLK_MOUT_PERI_IP 17
+#define CLK_DOUT_SHARED0_DIV3 18
+#define CLK_DOUT_SHARED0_DIV2 19
+#define CLK_DOUT_SHARED1_DIV3 20
+#define CLK_DOUT_SHARED1_DIV2 21
+#define CLK_DOUT_SHARED0_DIV4 22
+#define CLK_DOUT_SHARED1_DIV4 23
+#define CLK_DOUT_CORE_BUS 24
+#define CLK_DOUT_CORE_CCI 25
+#define CLK_DOUT_CORE_MMC_EMBD 26
+#define CLK_DOUT_CORE_SSS 27
+#define CLK_DOUT_DPU 28
+#define CLK_DOUT_HSI_BUS 29
+#define CLK_DOUT_HSI_MMC_CARD 30
+#define CLK_DOUT_HSI_USB20DRD 31
+#define CLK_DOUT_PERI_BUS 32
+#define CLK_DOUT_PERI_UART 33
+#define CLK_DOUT_PERI_IP 34
+#define CLK_GOUT_CORE_BUS 35
+#define CLK_GOUT_CORE_CCI 36
+#define CLK_GOUT_CORE_MMC_EMBD 37
+#define CLK_GOUT_CORE_SSS 38
+#define CLK_GOUT_DPU 39
+#define CLK_GOUT_HSI_BUS 40
+#define CLK_GOUT_HSI_MMC_CARD 41
+#define CLK_GOUT_HSI_USB20DRD 42
+#define CLK_GOUT_PERI_BUS 43
+#define CLK_GOUT_PERI_UART 44
+#define CLK_GOUT_PERI_IP 45
+#define TOP_NR_CLK 46
+
+/* CMU_HSI */
+#define CLK_MOUT_HSI_BUS_USER 1
+#define CLK_MOUT_HSI_MMC_CARD_USER 2
+#define CLK_MOUT_HSI_USB20DRD_USER 3
+#define CLK_MOUT_HSI_RTC 4
+#define CLK_GOUT_USB_RTC_CLK 5
+#define CLK_GOUT_USB_REF_CLK 6
+#define CLK_GOUT_USB_PHY_REF_CLK 7
+#define CLK_GOUT_USB_PHY_ACLK 8
+#define CLK_GOUT_USB_BUS_EARLY_CLK 9
+#define CLK_GOUT_GPIO_HSI_PCLK 10
+#define CLK_GOUT_MMC_CARD_ACLK 11
+#define CLK_GOUT_MMC_CARD_SDCLKIN 12
+#define CLK_GOUT_SYSREG_HSI_PCLK 13
+#define HSI_NR_CLK 14
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_UART_USER 2
+#define CLK_MOUT_PERI_HSI2C_USER 3
+#define CLK_MOUT_PERI_SPI_USER 4
+#define CLK_DOUT_PERI_HSI2C0 5
+#define CLK_DOUT_PERI_HSI2C1 6
+#define CLK_DOUT_PERI_HSI2C2 7
+#define CLK_DOUT_PERI_SPI0 8
+#define CLK_GOUT_PERI_HSI2C0 9
+#define CLK_GOUT_PERI_HSI2C1 10
+#define CLK_GOUT_PERI_HSI2C2 11
+#define CLK_GOUT_GPIO_PERI_PCLK 12
+#define CLK_GOUT_HSI2C0_IPCLK 13
+#define CLK_GOUT_HSI2C0_PCLK 14
+#define CLK_GOUT_HSI2C1_IPCLK 15
+#define CLK_GOUT_HSI2C1_PCLK 16
+#define CLK_GOUT_HSI2C2_IPCLK 17
+#define CLK_GOUT_HSI2C2_PCLK 18
+#define CLK_GOUT_I2C0_PCLK 19
+#define CLK_GOUT_I2C1_PCLK 20
+#define CLK_GOUT_I2C2_PCLK 21
+#define CLK_GOUT_I2C3_PCLK 22
+#define CLK_GOUT_I2C4_PCLK 23
+#define CLK_GOUT_I2C5_PCLK 24
+#define CLK_GOUT_I2C6_PCLK 25
+#define CLK_GOUT_MCT_PCLK 26
+#define CLK_GOUT_PWM_MOTOR_PCLK 27
+#define CLK_GOUT_SPI0_IPCLK 28
+#define CLK_GOUT_SPI0_PCLK 29
+#define CLK_GOUT_SYSREG_PERI_PCLK 30
+#define CLK_GOUT_UART_IPCLK 31
+#define CLK_GOUT_UART_PCLK 32
+#define CLK_GOUT_WDT0_PCLK 33
+#define CLK_GOUT_WDT1_PCLK 34
+#define PERI_NR_CLK 35
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_MMC_EMBD_USER 3
+#define CLK_MOUT_CORE_SSS_USER 4
+#define CLK_MOUT_CORE_GIC 5
+#define CLK_DOUT_CORE_BUSP 6
+#define CLK_GOUT_CCI_ACLK 7
+#define CLK_GOUT_GIC_CLK 8
+#define CLK_GOUT_MMC_EMBD_ACLK 9
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 10
+#define CLK_GOUT_SSS_ACLK 11
+#define CLK_GOUT_SSS_PCLK 12
+#define CORE_NR_CLK 13
+
+/* CMU_DPU */
+#define CLK_MOUT_DPU_USER 1
+#define CLK_DOUT_DPU_BUSP 2
+#define CLK_GOUT_DPU_CMU_DPU_PCLK 3
+#define CLK_GOUT_DPU_DECON0_ACLK 4
+#define CLK_GOUT_DPU_DMA_ACLK 5
+#define CLK_GOUT_DPU_DPP_ACLK 6
+#define CLK_GOUT_DPU_PPMU_ACLK 7
+#define CLK_GOUT_DPU_PPMU_PCLK 8
+#define CLK_GOUT_DPU_SMMU_CLK 9
+#define CLK_GOUT_DPU_SYSREG_PCLK 10
+#define DPU_NR_CLK 11
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_850_H */
diff --git a/include/dt-bindings/clock/imx8ulp-clock.h b/include/dt-bindings/clock/imx8ulp-clock.h
new file mode 100644
index 000000000000..953ecfe8ebcc
--- /dev/null
+++ b/include/dt-bindings/clock/imx8ulp-clock.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8ULP_H
+#define __DT_BINDINGS_CLOCK_IMX8ULP_H
+
+#define IMX8ULP_CLK_DUMMY 0
+
+/* CGC1 */
+#define IMX8ULP_CLK_SPLL2 5
+#define IMX8ULP_CLK_SPLL3 6
+#define IMX8ULP_CLK_A35_SEL 7
+#define IMX8ULP_CLK_A35_DIV 8
+#define IMX8ULP_CLK_SPLL2_PRE_SEL 9
+#define IMX8ULP_CLK_SPLL3_PRE_SEL 10
+#define IMX8ULP_CLK_SPLL3_PFD0 11
+#define IMX8ULP_CLK_SPLL3_PFD1 12
+#define IMX8ULP_CLK_SPLL3_PFD2 13
+#define IMX8ULP_CLK_SPLL3_PFD3 14
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1 15
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2 16
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1 17
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2 18
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1 19
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2 20
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1 21
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2 22
+#define IMX8ULP_CLK_NIC_SEL 23
+#define IMX8ULP_CLK_NIC_AD_DIVPLAT 24
+#define IMX8ULP_CLK_NIC_PER_DIVPLAT 25
+#define IMX8ULP_CLK_XBAR_SEL 26
+#define IMX8ULP_CLK_XBAR_AD_DIVPLAT 27
+#define IMX8ULP_CLK_XBAR_DIVBUS 28
+#define IMX8ULP_CLK_XBAR_AD_SLOW 29
+#define IMX8ULP_CLK_SOSC_DIV1 30
+#define IMX8ULP_CLK_SOSC_DIV2 31
+#define IMX8ULP_CLK_SOSC_DIV3 32
+#define IMX8ULP_CLK_FROSC_DIV1 33
+#define IMX8ULP_CLK_FROSC_DIV2 34
+#define IMX8ULP_CLK_FROSC_DIV3 35
+#define IMX8ULP_CLK_SPLL3_VCODIV 36
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1_GATE 37
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2_GATE 38
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1_GATE 39
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2_GATE 40
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1_GATE 41
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2_GATE 42
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1_GATE 43
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2_GATE 44
+#define IMX8ULP_CLK_SOSC_DIV1_GATE 45
+#define IMX8ULP_CLK_SOSC_DIV2_GATE 46
+#define IMX8ULP_CLK_SOSC_DIV3_GATE 47
+#define IMX8ULP_CLK_FROSC_DIV1_GATE 48
+#define IMX8ULP_CLK_FROSC_DIV2_GATE 49
+#define IMX8ULP_CLK_FROSC_DIV3_GATE 50
+#define IMX8ULP_CLK_SAI4_SEL 51
+#define IMX8ULP_CLK_SAI5_SEL 52
+#define IMX8ULP_CLK_AUD_CLK1 53
+#define IMX8ULP_CLK_ARM 54
+#define IMX8ULP_CLK_ENET_TS_SEL 55
+
+#define IMX8ULP_CLK_CGC1_END 56
+
+/* CGC2 */
+#define IMX8ULP_CLK_PLL4_PRE_SEL 0
+#define IMX8ULP_CLK_PLL4 1
+#define IMX8ULP_CLK_PLL4_VCODIV 2
+#define IMX8ULP_CLK_DDR_SEL 3
+#define IMX8ULP_CLK_DDR_DIV 4
+#define IMX8ULP_CLK_LPAV_AXI_SEL 5
+#define IMX8ULP_CLK_LPAV_AXI_DIV 6
+#define IMX8ULP_CLK_LPAV_AHB_DIV 7
+#define IMX8ULP_CLK_LPAV_BUS_DIV 8
+#define IMX8ULP_CLK_PLL4_PFD0 9
+#define IMX8ULP_CLK_PLL4_PFD1 10
+#define IMX8ULP_CLK_PLL4_PFD2 11
+#define IMX8ULP_CLK_PLL4_PFD3 12
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1_GATE 13
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2_GATE 14
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1_GATE 15
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2_GATE 16
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1_GATE 17
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2_GATE 18
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1_GATE 19
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2_GATE 20
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1 21
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2 22
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1 23
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2 24
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1 25
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2 26
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1 27
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2 28
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1_GATE 29
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2_GATE 30
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3_GATE 31
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1 32
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2 33
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3 34
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1_GATE 35
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2_GATE 36
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3_GATE 37
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1 38
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2 39
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3 40
+#define IMX8ULP_CLK_AUD_CLK2 41
+#define IMX8ULP_CLK_SAI6_SEL 42
+#define IMX8ULP_CLK_SAI7_SEL 43
+#define IMX8ULP_CLK_SPDIF_SEL 44
+#define IMX8ULP_CLK_HIFI_SEL 45
+#define IMX8ULP_CLK_HIFI_DIVCORE 46
+#define IMX8ULP_CLK_HIFI_DIVPLAT 47
+#define IMX8ULP_CLK_DSI_PHY_REF 48
+
+#define IMX8ULP_CLK_CGC2_END 49
+
+/* PCC3 */
+#define IMX8ULP_CLK_WDOG3 0
+#define IMX8ULP_CLK_WDOG4 1
+#define IMX8ULP_CLK_LPIT1 2
+#define IMX8ULP_CLK_TPM4 3
+#define IMX8ULP_CLK_TPM5 4
+#define IMX8ULP_CLK_FLEXIO1 5
+#define IMX8ULP_CLK_I3C2 6
+#define IMX8ULP_CLK_LPI2C4 7
+#define IMX8ULP_CLK_LPI2C5 8
+#define IMX8ULP_CLK_LPUART4 9
+#define IMX8ULP_CLK_LPUART5 10
+#define IMX8ULP_CLK_LPSPI4 11
+#define IMX8ULP_CLK_LPSPI5 12
+#define IMX8ULP_CLK_DMA1_MP 13
+#define IMX8ULP_CLK_DMA1_CH0 14
+#define IMX8ULP_CLK_DMA1_CH1 15
+#define IMX8ULP_CLK_DMA1_CH2 16
+#define IMX8ULP_CLK_DMA1_CH3 17
+#define IMX8ULP_CLK_DMA1_CH4 18
+#define IMX8ULP_CLK_DMA1_CH5 19
+#define IMX8ULP_CLK_DMA1_CH6 20
+#define IMX8ULP_CLK_DMA1_CH7 21
+#define IMX8ULP_CLK_DMA1_CH8 22
+#define IMX8ULP_CLK_DMA1_CH9 23
+#define IMX8ULP_CLK_DMA1_CH10 24
+#define IMX8ULP_CLK_DMA1_CH11 25
+#define IMX8ULP_CLK_DMA1_CH12 26
+#define IMX8ULP_CLK_DMA1_CH13 27
+#define IMX8ULP_CLK_DMA1_CH14 28
+#define IMX8ULP_CLK_DMA1_CH15 29
+#define IMX8ULP_CLK_DMA1_CH16 30
+#define IMX8ULP_CLK_DMA1_CH17 31
+#define IMX8ULP_CLK_DMA1_CH18 32
+#define IMX8ULP_CLK_DMA1_CH19 33
+#define IMX8ULP_CLK_DMA1_CH20 34
+#define IMX8ULP_CLK_DMA1_CH21 35
+#define IMX8ULP_CLK_DMA1_CH22 36
+#define IMX8ULP_CLK_DMA1_CH23 37
+#define IMX8ULP_CLK_DMA1_CH24 38
+#define IMX8ULP_CLK_DMA1_CH25 39
+#define IMX8ULP_CLK_DMA1_CH26 40
+#define IMX8ULP_CLK_DMA1_CH27 41
+#define IMX8ULP_CLK_DMA1_CH28 42
+#define IMX8ULP_CLK_DMA1_CH29 43
+#define IMX8ULP_CLK_DMA1_CH30 44
+#define IMX8ULP_CLK_DMA1_CH31 45
+#define IMX8ULP_CLK_MU3_A 46
+#define IMX8ULP_CLK_MU0_B 47
+
+#define IMX8ULP_CLK_PCC3_END 48
+
+/* PCC4 */
+#define IMX8ULP_CLK_FLEXSPI2 0
+#define IMX8ULP_CLK_TPM6 1
+#define IMX8ULP_CLK_TPM7 2
+#define IMX8ULP_CLK_LPI2C6 3
+#define IMX8ULP_CLK_LPI2C7 4
+#define IMX8ULP_CLK_LPUART6 5
+#define IMX8ULP_CLK_LPUART7 6
+#define IMX8ULP_CLK_SAI4 7
+#define IMX8ULP_CLK_SAI5 8
+#define IMX8ULP_CLK_PCTLE 9
+#define IMX8ULP_CLK_PCTLF 10
+#define IMX8ULP_CLK_USDHC0 11
+#define IMX8ULP_CLK_USDHC1 12
+#define IMX8ULP_CLK_USDHC2 13
+#define IMX8ULP_CLK_USB0 14
+#define IMX8ULP_CLK_USB0_PHY 15
+#define IMX8ULP_CLK_USB1 16
+#define IMX8ULP_CLK_USB1_PHY 17
+#define IMX8ULP_CLK_USB_XBAR 18
+#define IMX8ULP_CLK_ENET 19
+#define IMX8ULP_CLK_SFA1 20
+#define IMX8ULP_CLK_RGPIOE 21
+#define IMX8ULP_CLK_RGPIOF 22
+
+#define IMX8ULP_CLK_PCC4_END 23
+
+/* PCC5 */
+#define IMX8ULP_CLK_TPM8 0
+#define IMX8ULP_CLK_SAI6 1
+#define IMX8ULP_CLK_SAI7 2
+#define IMX8ULP_CLK_SPDIF 3
+#define IMX8ULP_CLK_ISI 4
+#define IMX8ULP_CLK_CSI_REGS 5
+#define IMX8ULP_CLK_PCTLD 6
+#define IMX8ULP_CLK_CSI 7
+#define IMX8ULP_CLK_DSI 8
+#define IMX8ULP_CLK_WDOG5 9
+#define IMX8ULP_CLK_EPDC 10
+#define IMX8ULP_CLK_PXP 11
+#define IMX8ULP_CLK_SFA2 12
+#define IMX8ULP_CLK_GPU2D 13
+#define IMX8ULP_CLK_GPU3D 14
+#define IMX8ULP_CLK_DC_NANO 15
+#define IMX8ULP_CLK_CSI_CLK_UI 16
+#define IMX8ULP_CLK_CSI_CLK_ESC 17
+#define IMX8ULP_CLK_RGPIOD 18
+#define IMX8ULP_CLK_DMA2_MP 19
+#define IMX8ULP_CLK_DMA2_CH0 20
+#define IMX8ULP_CLK_DMA2_CH1 21
+#define IMX8ULP_CLK_DMA2_CH2 22
+#define IMX8ULP_CLK_DMA2_CH3 23
+#define IMX8ULP_CLK_DMA2_CH4 24
+#define IMX8ULP_CLK_DMA2_CH5 25
+#define IMX8ULP_CLK_DMA2_CH6 26
+#define IMX8ULP_CLK_DMA2_CH7 27
+#define IMX8ULP_CLK_DMA2_CH8 28
+#define IMX8ULP_CLK_DMA2_CH9 29
+#define IMX8ULP_CLK_DMA2_CH10 30
+#define IMX8ULP_CLK_DMA2_CH11 31
+#define IMX8ULP_CLK_DMA2_CH12 32
+#define IMX8ULP_CLK_DMA2_CH13 33
+#define IMX8ULP_CLK_DMA2_CH14 34
+#define IMX8ULP_CLK_DMA2_CH15 35
+#define IMX8ULP_CLK_DMA2_CH16 36
+#define IMX8ULP_CLK_DMA2_CH17 37
+#define IMX8ULP_CLK_DMA2_CH18 38
+#define IMX8ULP_CLK_DMA2_CH19 39
+#define IMX8ULP_CLK_DMA2_CH20 40
+#define IMX8ULP_CLK_DMA2_CH21 41
+#define IMX8ULP_CLK_DMA2_CH22 42
+#define IMX8ULP_CLK_DMA2_CH23 43
+#define IMX8ULP_CLK_DMA2_CH24 44
+#define IMX8ULP_CLK_DMA2_CH25 45
+#define IMX8ULP_CLK_DMA2_CH26 46
+#define IMX8ULP_CLK_DMA2_CH27 47
+#define IMX8ULP_CLK_DMA2_CH28 48
+#define IMX8ULP_CLK_DMA2_CH29 49
+#define IMX8ULP_CLK_DMA2_CH30 50
+#define IMX8ULP_CLK_DMA2_CH31 51
+#define IMX8ULP_CLK_MU2_B 52
+#define IMX8ULP_CLK_MU3_B 53
+#define IMX8ULP_CLK_AVD_SIM 54
+#define IMX8ULP_CLK_DSI_TX_ESC 55
+
+#define IMX8ULP_CLK_PCC5_END 56
+
+#endif
diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
index 31f1ab0fe42c..31f1ab0fe42c 100644
--- a/include/dt-bindings/clock/jz4725b-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
index e82d77028581..e82d77028581 100644
--- a/include/dt-bindings/clock/jz4740-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
diff --git a/include/dt-bindings/clock/jz4760-cgu.h b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
index 4bb2e19c4743..4bb2e19c4743 100644
--- a/include/dt-bindings/clock/jz4760-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
index d68a7695a1f8..d68a7695a1f8 100644
--- a/include/dt-bindings/clock/jz4770-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
index 85cf8eb5081b..85cf8eb5081b 100644
--- a/include/dt-bindings/clock/jz4780-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/ingenic,x1000-cgu.h
index f187e0719fd3..f187e0719fd3 100644
--- a/include/dt-bindings/clock/x1000-cgu.h
+++ b/include/dt-bindings/clock/ingenic,x1000-cgu.h
diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/ingenic,x1830-cgu.h
index 88455376a950..88455376a950 100644
--- a/include/dt-bindings/clock/x1830-cgu.h
+++ b/include/dt-bindings/clock/ingenic,x1830-cgu.h
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index f33781338eda..78aa07fd7cc0 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -105,6 +105,16 @@
#define CLKID_PERIPH 126
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_VID_PLL_FINAL_DIV 137
+#define CLKID_VCLK_IN_SEL 138
+#define CLKID_VCLK2_IN_SEL 149
+#define CLKID_CTS_ENCT 161
+#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_ENCI 165
+#define CLKID_HDMI_TX_PIXEL 167
+#define CLKID_CTS_ENCL 169
+#define CLKID_CTS_VDAC0 171
#define CLKID_HDMI_SYS 174
#define CLKID_VPU 190
#define CLKID_VDEC_1 196
diff --git a/include/dt-bindings/clock/mt8195-clk.h b/include/dt-bindings/clock/mt8195-clk.h
new file mode 100644
index 000000000000..95cf812a0b37
--- /dev/null
+++ b/include/dt-bindings/clock/mt8195-clk.h
@@ -0,0 +1,864 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8195_H
+#define _DT_BINDINGS_CLK_MT8195_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SPM 1
+#define CLK_TOP_SCP 2
+#define CLK_TOP_BUS_AXIMEM 3
+#define CLK_TOP_VPP 4
+#define CLK_TOP_ETHDR 5
+#define CLK_TOP_IPE 6
+#define CLK_TOP_CAM 7
+#define CLK_TOP_CCU 8
+#define CLK_TOP_IMG 9
+#define CLK_TOP_CAMTM 10
+#define CLK_TOP_DSP 11
+#define CLK_TOP_DSP1 12
+#define CLK_TOP_DSP2 13
+#define CLK_TOP_DSP3 14
+#define CLK_TOP_DSP4 15
+#define CLK_TOP_DSP5 16
+#define CLK_TOP_DSP6 17
+#define CLK_TOP_DSP7 18
+#define CLK_TOP_IPU_IF 19
+#define CLK_TOP_MFG_CORE_TMP 20
+#define CLK_TOP_CAMTG 21
+#define CLK_TOP_CAMTG2 22
+#define CLK_TOP_CAMTG3 23
+#define CLK_TOP_CAMTG4 24
+#define CLK_TOP_CAMTG5 25
+#define CLK_TOP_UART 26
+#define CLK_TOP_SPI 27
+#define CLK_TOP_SPIS 28
+#define CLK_TOP_MSDC50_0_HCLK 29
+#define CLK_TOP_MSDC50_0 30
+#define CLK_TOP_MSDC30_1 31
+#define CLK_TOP_MSDC30_2 32
+#define CLK_TOP_INTDIR 33
+#define CLK_TOP_AUD_INTBUS 34
+#define CLK_TOP_AUDIO_H 35
+#define CLK_TOP_PWRAP_ULPOSC 36
+#define CLK_TOP_ATB 37
+#define CLK_TOP_PWRMCU 38
+#define CLK_TOP_DP 39
+#define CLK_TOP_EDP 40
+#define CLK_TOP_DPI 41
+#define CLK_TOP_DISP_PWM0 42
+#define CLK_TOP_DISP_PWM1 43
+#define CLK_TOP_USB_TOP 44
+#define CLK_TOP_SSUSB_XHCI 45
+#define CLK_TOP_USB_TOP_1P 46
+#define CLK_TOP_SSUSB_XHCI_1P 47
+#define CLK_TOP_USB_TOP_2P 48
+#define CLK_TOP_SSUSB_XHCI_2P 49
+#define CLK_TOP_USB_TOP_3P 50
+#define CLK_TOP_SSUSB_XHCI_3P 51
+#define CLK_TOP_I2C 52
+#define CLK_TOP_SENINF 53
+#define CLK_TOP_SENINF1 54
+#define CLK_TOP_SENINF2 55
+#define CLK_TOP_SENINF3 56
+#define CLK_TOP_GCPU 57
+#define CLK_TOP_DXCC 58
+#define CLK_TOP_DPMAIF_MAIN 59
+#define CLK_TOP_AES_UFSFDE 60
+#define CLK_TOP_UFS 61
+#define CLK_TOP_UFS_TICK1US 62
+#define CLK_TOP_UFS_MP_SAP_CFG 63
+#define CLK_TOP_VENC 64
+#define CLK_TOP_VDEC 65
+#define CLK_TOP_PWM 66
+#define CLK_TOP_MCUPM 67
+#define CLK_TOP_SPMI_P_MST 68
+#define CLK_TOP_SPMI_M_MST 69
+#define CLK_TOP_DVFSRC 70
+#define CLK_TOP_TL 71
+#define CLK_TOP_TL_P1 72
+#define CLK_TOP_AES_MSDCFDE 73
+#define CLK_TOP_DSI_OCC 74
+#define CLK_TOP_WPE_VPP 75
+#define CLK_TOP_HDCP 76
+#define CLK_TOP_HDCP_24M 77
+#define CLK_TOP_HD20_DACR_REF_CLK 78
+#define CLK_TOP_HD20_HDCP_CCLK 79
+#define CLK_TOP_HDMI_XTAL 80
+#define CLK_TOP_HDMI_APB 81
+#define CLK_TOP_SNPS_ETH_250M 82
+#define CLK_TOP_SNPS_ETH_62P4M_PTP 83
+#define CLK_TOP_SNPS_ETH_50M_RMII 84
+#define CLK_TOP_DGI_OUT 85
+#define CLK_TOP_NNA0 86
+#define CLK_TOP_NNA1 87
+#define CLK_TOP_ADSP 88
+#define CLK_TOP_ASM_H 89
+#define CLK_TOP_ASM_M 90
+#define CLK_TOP_ASM_L 91
+#define CLK_TOP_APLL1 92
+#define CLK_TOP_APLL2 93
+#define CLK_TOP_APLL3 94
+#define CLK_TOP_APLL4 95
+#define CLK_TOP_APLL5 96
+#define CLK_TOP_I2SO1_MCK 97
+#define CLK_TOP_I2SO2_MCK 98
+#define CLK_TOP_I2SI1_MCK 99
+#define CLK_TOP_I2SI2_MCK 100
+#define CLK_TOP_DPTX_MCK 101
+#define CLK_TOP_AUD_IEC_CLK 102
+#define CLK_TOP_A1SYS_HP 103
+#define CLK_TOP_A2SYS_HF 104
+#define CLK_TOP_A3SYS_HF 105
+#define CLK_TOP_A4SYS_HF 106
+#define CLK_TOP_SPINFI_BCLK 107
+#define CLK_TOP_NFI1X 108
+#define CLK_TOP_ECC 109
+#define CLK_TOP_AUDIO_LOCAL_BUS 110
+#define CLK_TOP_SPINOR 111
+#define CLK_TOP_DVIO_DGI_REF 112
+#define CLK_TOP_ULPOSC 113
+#define CLK_TOP_ULPOSC_CORE 114
+#define CLK_TOP_SRCK 115
+#define CLK_TOP_MFG_CK_FAST_REF 116
+#define CLK_TOP_CLK26M_D2 117
+#define CLK_TOP_CLK26M_D52 118
+#define CLK_TOP_IN_DGI 119
+#define CLK_TOP_IN_DGI_D2 120
+#define CLK_TOP_IN_DGI_D4 121
+#define CLK_TOP_IN_DGI_D6 122
+#define CLK_TOP_IN_DGI_D8 123
+#define CLK_TOP_MAINPLL_D3 124
+#define CLK_TOP_MAINPLL_D4 125
+#define CLK_TOP_MAINPLL_D4_D2 126
+#define CLK_TOP_MAINPLL_D4_D4 127
+#define CLK_TOP_MAINPLL_D4_D8 128
+#define CLK_TOP_MAINPLL_D5 129
+#define CLK_TOP_MAINPLL_D5_D2 130
+#define CLK_TOP_MAINPLL_D5_D4 131
+#define CLK_TOP_MAINPLL_D5_D8 132
+#define CLK_TOP_MAINPLL_D6 133
+#define CLK_TOP_MAINPLL_D6_D2 134
+#define CLK_TOP_MAINPLL_D6_D4 135
+#define CLK_TOP_MAINPLL_D6_D8 136
+#define CLK_TOP_MAINPLL_D7 137
+#define CLK_TOP_MAINPLL_D7_D2 138
+#define CLK_TOP_MAINPLL_D7_D4 139
+#define CLK_TOP_MAINPLL_D7_D8 140
+#define CLK_TOP_MAINPLL_D9 141
+#define CLK_TOP_UNIVPLL_D2 142
+#define CLK_TOP_UNIVPLL_D3 143
+#define CLK_TOP_UNIVPLL_D4 144
+#define CLK_TOP_UNIVPLL_D4_D2 145
+#define CLK_TOP_UNIVPLL_D4_D4 146
+#define CLK_TOP_UNIVPLL_D4_D8 147
+#define CLK_TOP_UNIVPLL_D5 148
+#define CLK_TOP_UNIVPLL_D5_D2 149
+#define CLK_TOP_UNIVPLL_D5_D4 150
+#define CLK_TOP_UNIVPLL_D5_D8 151
+#define CLK_TOP_UNIVPLL_D6 152
+#define CLK_TOP_UNIVPLL_D6_D2 153
+#define CLK_TOP_UNIVPLL_D6_D4 154
+#define CLK_TOP_UNIVPLL_D6_D8 155
+#define CLK_TOP_UNIVPLL_D6_D16 156
+#define CLK_TOP_UNIVPLL_D7 157
+#define CLK_TOP_UNIVPLL_192M 158
+#define CLK_TOP_UNIVPLL_192M_D4 159
+#define CLK_TOP_UNIVPLL_192M_D8 160
+#define CLK_TOP_UNIVPLL_192M_D16 161
+#define CLK_TOP_UNIVPLL_192M_D32 162
+#define CLK_TOP_APLL1_D3 163
+#define CLK_TOP_APLL1_D4 164
+#define CLK_TOP_APLL2_D3 165
+#define CLK_TOP_APLL2_D4 166
+#define CLK_TOP_APLL3_D4 167
+#define CLK_TOP_APLL4_D4 168
+#define CLK_TOP_APLL5_D4 169
+#define CLK_TOP_HDMIRX_APLL_D3 170
+#define CLK_TOP_HDMIRX_APLL_D4 171
+#define CLK_TOP_HDMIRX_APLL_D6 172
+#define CLK_TOP_MMPLL_D4 173
+#define CLK_TOP_MMPLL_D4_D2 174
+#define CLK_TOP_MMPLL_D4_D4 175
+#define CLK_TOP_MMPLL_D5 176
+#define CLK_TOP_MMPLL_D5_D2 177
+#define CLK_TOP_MMPLL_D5_D4 178
+#define CLK_TOP_MMPLL_D6 179
+#define CLK_TOP_MMPLL_D6_D2 180
+#define CLK_TOP_MMPLL_D7 181
+#define CLK_TOP_MMPLL_D9 182
+#define CLK_TOP_TVDPLL1_D2 183
+#define CLK_TOP_TVDPLL1_D4 184
+#define CLK_TOP_TVDPLL1_D8 185
+#define CLK_TOP_TVDPLL1_D16 186
+#define CLK_TOP_TVDPLL2_D2 187
+#define CLK_TOP_TVDPLL2_D4 188
+#define CLK_TOP_TVDPLL2_D8 189
+#define CLK_TOP_TVDPLL2_D16 190
+#define CLK_TOP_MSDCPLL_D2 191
+#define CLK_TOP_MSDCPLL_D4 192
+#define CLK_TOP_MSDCPLL_D16 193
+#define CLK_TOP_ETHPLL_D2 194
+#define CLK_TOP_ETHPLL_D8 195
+#define CLK_TOP_ETHPLL_D10 196
+#define CLK_TOP_DGIPLL_D2 197
+#define CLK_TOP_ULPOSC1 198
+#define CLK_TOP_ULPOSC1_D2 199
+#define CLK_TOP_ULPOSC1_D4 200
+#define CLK_TOP_ULPOSC1_D7 201
+#define CLK_TOP_ULPOSC1_D8 202
+#define CLK_TOP_ULPOSC1_D10 203
+#define CLK_TOP_ULPOSC1_D16 204
+#define CLK_TOP_ULPOSC2 205
+#define CLK_TOP_ADSPPLL_D2 206
+#define CLK_TOP_ADSPPLL_D4 207
+#define CLK_TOP_ADSPPLL_D8 208
+#define CLK_TOP_MEM_466M 209
+#define CLK_TOP_MPHONE_SLAVE_B 210
+#define CLK_TOP_PEXTP_PIPE 211
+#define CLK_TOP_UFS_RX_SYMBOL 212
+#define CLK_TOP_UFS_TX_SYMBOL 213
+#define CLK_TOP_SSUSB_U3PHY_P1_P_P0 214
+#define CLK_TOP_UFS_RX_SYMBOL1 215
+#define CLK_TOP_FPC 216
+#define CLK_TOP_HDMIRX_P 217
+#define CLK_TOP_APLL12_DIV0 218
+#define CLK_TOP_APLL12_DIV1 219
+#define CLK_TOP_APLL12_DIV2 220
+#define CLK_TOP_APLL12_DIV3 221
+#define CLK_TOP_APLL12_DIV4 222
+#define CLK_TOP_APLL12_DIV9 223
+#define CLK_TOP_CFG_VPP0 224
+#define CLK_TOP_CFG_VPP1 225
+#define CLK_TOP_CFG_VDO0 226
+#define CLK_TOP_CFG_VDO1 227
+#define CLK_TOP_CFG_UNIPLL_SES 228
+#define CLK_TOP_CFG_26M_VPP0 229
+#define CLK_TOP_CFG_26M_VPP1 230
+#define CLK_TOP_CFG_26M_AUD 231
+#define CLK_TOP_CFG_AXI_EAST 232
+#define CLK_TOP_CFG_AXI_EAST_NORTH 233
+#define CLK_TOP_CFG_AXI_NORTH 234
+#define CLK_TOP_CFG_AXI_SOUTH 235
+#define CLK_TOP_CFG_EXT_TEST 236
+#define CLK_TOP_SSUSB_REF 237
+#define CLK_TOP_SSUSB_PHY_REF 238
+#define CLK_TOP_SSUSB_P1_REF 239
+#define CLK_TOP_SSUSB_PHY_P1_REF 240
+#define CLK_TOP_SSUSB_P2_REF 241
+#define CLK_TOP_SSUSB_PHY_P2_REF 242
+#define CLK_TOP_SSUSB_P3_REF 243
+#define CLK_TOP_SSUSB_PHY_P3_REF 244
+#define CLK_TOP_NR_CLK 245
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SEJ 4
+#define CLK_INFRA_AO_APXGPT 5
+#define CLK_INFRA_AO_GCE 6
+#define CLK_INFRA_AO_GCE2 7
+#define CLK_INFRA_AO_THERM 8
+#define CLK_INFRA_AO_PWM_H 9
+#define CLK_INFRA_AO_PWM1 10
+#define CLK_INFRA_AO_PWM2 11
+#define CLK_INFRA_AO_PWM3 12
+#define CLK_INFRA_AO_PWM4 13
+#define CLK_INFRA_AO_PWM 14
+#define CLK_INFRA_AO_UART0 15
+#define CLK_INFRA_AO_UART1 16
+#define CLK_INFRA_AO_UART2 17
+#define CLK_INFRA_AO_UART3 18
+#define CLK_INFRA_AO_UART4 19
+#define CLK_INFRA_AO_GCE_26M 20
+#define CLK_INFRA_AO_CQ_DMA_FPC 21
+#define CLK_INFRA_AO_UART5 22
+#define CLK_INFRA_AO_HDMI_26M 23
+#define CLK_INFRA_AO_SPI0 24
+#define CLK_INFRA_AO_MSDC0 25
+#define CLK_INFRA_AO_MSDC1 26
+#define CLK_INFRA_AO_CG1_MSDC2 27
+#define CLK_INFRA_AO_MSDC0_SRC 28
+#define CLK_INFRA_AO_TRNG 29
+#define CLK_INFRA_AO_AUXADC 30
+#define CLK_INFRA_AO_CPUM 31
+#define CLK_INFRA_AO_HDMI_32K 32
+#define CLK_INFRA_AO_CEC_66M_H 33
+#define CLK_INFRA_AO_IRRX 34
+#define CLK_INFRA_AO_PCIE_TL_26M 35
+#define CLK_INFRA_AO_MSDC1_SRC 36
+#define CLK_INFRA_AO_CEC_66M_B 37
+#define CLK_INFRA_AO_PCIE_TL_96M 38
+#define CLK_INFRA_AO_DEVICE_APC 39
+#define CLK_INFRA_AO_ECC_66M_H 40
+#define CLK_INFRA_AO_DEBUGSYS 41
+#define CLK_INFRA_AO_AUDIO 42
+#define CLK_INFRA_AO_PCIE_TL_32K 43
+#define CLK_INFRA_AO_DBG_TRACE 44
+#define CLK_INFRA_AO_DRAMC_F26M 45
+#define CLK_INFRA_AO_IRTX 46
+#define CLK_INFRA_AO_SSUSB 47
+#define CLK_INFRA_AO_DISP_PWM 48
+#define CLK_INFRA_AO_CLDMA_B 49
+#define CLK_INFRA_AO_AUDIO_26M_B 50
+#define CLK_INFRA_AO_SPI1 51
+#define CLK_INFRA_AO_SPI2 52
+#define CLK_INFRA_AO_SPI3 53
+#define CLK_INFRA_AO_UNIPRO_SYS 54
+#define CLK_INFRA_AO_UNIPRO_TICK 55
+#define CLK_INFRA_AO_UFS_MP_SAP_B 56
+#define CLK_INFRA_AO_PWRMCU 57
+#define CLK_INFRA_AO_PWRMCU_BUS_H 58
+#define CLK_INFRA_AO_APDMA_B 59
+#define CLK_INFRA_AO_SPI4 60
+#define CLK_INFRA_AO_SPI5 61
+#define CLK_INFRA_AO_CQ_DMA 62
+#define CLK_INFRA_AO_AES_UFSFDE 63
+#define CLK_INFRA_AO_AES 64
+#define CLK_INFRA_AO_UFS_TICK 65
+#define CLK_INFRA_AO_SSUSB_XHCI 66
+#define CLK_INFRA_AO_MSDC0_SELF 67
+#define CLK_INFRA_AO_MSDC1_SELF 68
+#define CLK_INFRA_AO_MSDC2_SELF 69
+#define CLK_INFRA_AO_I2S_DMA 70
+#define CLK_INFRA_AO_AP_MSDC0 71
+#define CLK_INFRA_AO_MD_MSDC0 72
+#define CLK_INFRA_AO_CG3_MSDC2 73
+#define CLK_INFRA_AO_GCPU 74
+#define CLK_INFRA_AO_PCIE_PERI_26M 75
+#define CLK_INFRA_AO_GCPU_66M_B 76
+#define CLK_INFRA_AO_GCPU_133M_B 77
+#define CLK_INFRA_AO_DISP_PWM1 78
+#define CLK_INFRA_AO_FBIST2FPC 79
+#define CLK_INFRA_AO_DEVICE_APC_SYNC 80
+#define CLK_INFRA_AO_PCIE_P1_PERI_26M 81
+#define CLK_INFRA_AO_SPIS0 82
+#define CLK_INFRA_AO_SPIS1 83
+#define CLK_INFRA_AO_133M_M_PERI 84
+#define CLK_INFRA_AO_66M_M_PERI 85
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 86
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P1 87
+#define CLK_INFRA_AO_PCIE_P1_TL_96M 88
+#define CLK_INFRA_AO_AES_MSDCFDE_0P 89
+#define CLK_INFRA_AO_UFS_TX_SYMBOL 90
+#define CLK_INFRA_AO_UFS_RX_SYMBOL 91
+#define CLK_INFRA_AO_UFS_RX_SYMBOL1 92
+#define CLK_INFRA_AO_PERI_UFS_MEM_SUB 93
+#define CLK_INFRA_AO_NR_CLK 94
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NNAPLL 0
+#define CLK_APMIXED_RESPLL 1
+#define CLK_APMIXED_ETHPLL 2
+#define CLK_APMIXED_MSDCPLL 3
+#define CLK_APMIXED_TVDPLL1 4
+#define CLK_APMIXED_TVDPLL2 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_MAINPLL 7
+#define CLK_APMIXED_VDECPLL 8
+#define CLK_APMIXED_IMGPLL 9
+#define CLK_APMIXED_UNIVPLL 10
+#define CLK_APMIXED_HDMIPLL1 11
+#define CLK_APMIXED_HDMIPLL2 12
+#define CLK_APMIXED_HDMIRX_APLL 13
+#define CLK_APMIXED_USB1PLL 14
+#define CLK_APMIXED_ADSPPLL 15
+#define CLK_APMIXED_APLL1 16
+#define CLK_APMIXED_APLL2 17
+#define CLK_APMIXED_APLL3 18
+#define CLK_APMIXED_APLL4 19
+#define CLK_APMIXED_APLL5 20
+#define CLK_APMIXED_MFGPLL 21
+#define CLK_APMIXED_DGIPLL 22
+#define CLK_APMIXED_PLL_SSUSB26M 23
+#define CLK_APMIXED_NR_CLK 24
+
+/* SCP_ADSP */
+
+#define CLK_SCP_ADSP_AUDIODSP 0
+#define CLK_SCP_ADSP_NR_CLK 1
+
+/* PERICFG_AO */
+
+#define CLK_PERI_AO_ETHERNET 0
+#define CLK_PERI_AO_ETHERNET_BUS 1
+#define CLK_PERI_AO_FLASHIF_BUS 2
+#define CLK_PERI_AO_FLASHIF_FLASH 3
+#define CLK_PERI_AO_SSUSB_1P_BUS 4
+#define CLK_PERI_AO_SSUSB_1P_XHCI 5
+#define CLK_PERI_AO_SSUSB_2P_BUS 6
+#define CLK_PERI_AO_SSUSB_2P_XHCI 7
+#define CLK_PERI_AO_SSUSB_3P_BUS 8
+#define CLK_PERI_AO_SSUSB_3P_XHCI 9
+#define CLK_PERI_AO_SPINFI 10
+#define CLK_PERI_AO_ETHERNET_MAC 11
+#define CLK_PERI_AO_NFI_H 12
+#define CLK_PERI_AO_FNFI1X 13
+#define CLK_PERI_AO_PCIE_P0_MEM 14
+#define CLK_PERI_AO_PCIE_P1_MEM 15
+#define CLK_PERI_AO_NR_CLK 16
+
+/* IMP_IIC_WRAP_S */
+
+#define CLK_IMP_IIC_WRAP_S_I2C5 0
+#define CLK_IMP_IIC_WRAP_S_I2C6 1
+#define CLK_IMP_IIC_WRAP_S_I2C7 2
+#define CLK_IMP_IIC_WRAP_S_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+
+#define CLK_IMP_IIC_WRAP_W_I2C0 0
+#define CLK_IMP_IIC_WRAP_W_I2C1 1
+#define CLK_IMP_IIC_WRAP_W_I2C2 2
+#define CLK_IMP_IIC_WRAP_W_I2C3 3
+#define CLK_IMP_IIC_WRAP_W_I2C4 4
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 5
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* VPPSYS0 */
+
+#define CLK_VPP0_MDP_FG 0
+#define CLK_VPP0_STITCH 1
+#define CLK_VPP0_PADDING 2
+#define CLK_VPP0_MDP_TCC 3
+#define CLK_VPP0_WARP0_ASYNC_TX 4
+#define CLK_VPP0_WARP1_ASYNC_TX 5
+#define CLK_VPP0_MUTEX 6
+#define CLK_VPP0_VPP02VPP1_RELAY 7
+#define CLK_VPP0_VPP12VPP0_ASYNC 8
+#define CLK_VPP0_MMSYSRAM_TOP 9
+#define CLK_VPP0_MDP_AAL 10
+#define CLK_VPP0_MDP_RSZ 11
+#define CLK_VPP0_SMI_COMMON 12
+#define CLK_VPP0_GALS_VDO0_LARB0 13
+#define CLK_VPP0_GALS_VDO0_LARB1 14
+#define CLK_VPP0_GALS_VENCSYS 15
+#define CLK_VPP0_GALS_VENCSYS_CORE1 16
+#define CLK_VPP0_GALS_INFRA 17
+#define CLK_VPP0_GALS_CAMSYS 18
+#define CLK_VPP0_GALS_VPP1_LARB5 19
+#define CLK_VPP0_GALS_VPP1_LARB6 20
+#define CLK_VPP0_SMI_REORDER 21
+#define CLK_VPP0_SMI_IOMMU 22
+#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23
+#define CLK_VPP0_MDP_RDMA 24
+#define CLK_VPP0_MDP_WROT 25
+#define CLK_VPP0_GALS_EMI0_EMI1 26
+#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27
+#define CLK_VPP0_SMI_RSI 28
+#define CLK_VPP0_SMI_COMMON_LARB4 29
+#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30
+#define CLK_VPP0_GALS_VPP1_WPE 31
+#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32
+#define CLK_VPP0_FAKE_ENG 33
+#define CLK_VPP0_MDP_HDR 34
+#define CLK_VPP0_MDP_TDSHP 35
+#define CLK_VPP0_MDP_COLOR 36
+#define CLK_VPP0_MDP_OVL 37
+#define CLK_VPP0_WARP0_RELAY 38
+#define CLK_VPP0_WARP0_MDP_DL_ASYNC 39
+#define CLK_VPP0_WARP1_RELAY 40
+#define CLK_VPP0_WARP1_MDP_DL_ASYNC 41
+#define CLK_VPP0_NR_CLK 42
+
+/* WPESYS */
+
+#define CLK_WPE_VPP0 0
+#define CLK_WPE_VPP1 1
+#define CLK_WPE_SMI_LARB7 2
+#define CLK_WPE_SMI_LARB8 3
+#define CLK_WPE_EVENT_TX 4
+#define CLK_WPE_SMI_LARB7_P 5
+#define CLK_WPE_SMI_LARB8_P 6
+#define CLK_WPE_NR_CLK 7
+
+/* WPESYS_VPP0 */
+
+#define CLK_WPE_VPP0_VECI 0
+#define CLK_WPE_VPP0_VEC2I 1
+#define CLK_WPE_VPP0_VEC3I 2
+#define CLK_WPE_VPP0_WPEO 3
+#define CLK_WPE_VPP0_MSKO 4
+#define CLK_WPE_VPP0_VGEN 5
+#define CLK_WPE_VPP0_EXT 6
+#define CLK_WPE_VPP0_VFC 7
+#define CLK_WPE_VPP0_CACH0_TOP 8
+#define CLK_WPE_VPP0_CACH0_DMA 9
+#define CLK_WPE_VPP0_CACH1_TOP 10
+#define CLK_WPE_VPP0_CACH1_DMA 11
+#define CLK_WPE_VPP0_CACH2_TOP 12
+#define CLK_WPE_VPP0_CACH2_DMA 13
+#define CLK_WPE_VPP0_CACH3_TOP 14
+#define CLK_WPE_VPP0_CACH3_DMA 15
+#define CLK_WPE_VPP0_PSP 16
+#define CLK_WPE_VPP0_PSP2 17
+#define CLK_WPE_VPP0_SYNC 18
+#define CLK_WPE_VPP0_C24 19
+#define CLK_WPE_VPP0_MDP_CROP 20
+#define CLK_WPE_VPP0_ISP_CROP 21
+#define CLK_WPE_VPP0_TOP 22
+#define CLK_WPE_VPP0_NR_CLK 23
+
+/* WPESYS_VPP1 */
+
+#define CLK_WPE_VPP1_VECI 0
+#define CLK_WPE_VPP1_VEC2I 1
+#define CLK_WPE_VPP1_VEC3I 2
+#define CLK_WPE_VPP1_WPEO 3
+#define CLK_WPE_VPP1_MSKO 4
+#define CLK_WPE_VPP1_VGEN 5
+#define CLK_WPE_VPP1_EXT 6
+#define CLK_WPE_VPP1_VFC 7
+#define CLK_WPE_VPP1_CACH0_TOP 8
+#define CLK_WPE_VPP1_CACH0_DMA 9
+#define CLK_WPE_VPP1_CACH1_TOP 10
+#define CLK_WPE_VPP1_CACH1_DMA 11
+#define CLK_WPE_VPP1_CACH2_TOP 12
+#define CLK_WPE_VPP1_CACH2_DMA 13
+#define CLK_WPE_VPP1_CACH3_TOP 14
+#define CLK_WPE_VPP1_CACH3_DMA 15
+#define CLK_WPE_VPP1_PSP 16
+#define CLK_WPE_VPP1_PSP2 17
+#define CLK_WPE_VPP1_SYNC 18
+#define CLK_WPE_VPP1_C24 19
+#define CLK_WPE_VPP1_MDP_CROP 20
+#define CLK_WPE_VPP1_ISP_CROP 21
+#define CLK_WPE_VPP1_TOP 22
+#define CLK_WPE_VPP1_NR_CLK 23
+
+/* VPPSYS1 */
+
+#define CLK_VPP1_SVPP1_MDP_OVL 0
+#define CLK_VPP1_SVPP1_MDP_TCC 1
+#define CLK_VPP1_SVPP1_MDP_WROT 2
+#define CLK_VPP1_SVPP1_VPP_PAD 3
+#define CLK_VPP1_SVPP2_MDP_WROT 4
+#define CLK_VPP1_SVPP2_VPP_PAD 5
+#define CLK_VPP1_SVPP3_MDP_WROT 6
+#define CLK_VPP1_SVPP3_VPP_PAD 7
+#define CLK_VPP1_SVPP1_MDP_RDMA 8
+#define CLK_VPP1_SVPP1_MDP_FG 9
+#define CLK_VPP1_SVPP2_MDP_RDMA 10
+#define CLK_VPP1_SVPP2_MDP_FG 11
+#define CLK_VPP1_SVPP3_MDP_RDMA 12
+#define CLK_VPP1_SVPP3_MDP_FG 13
+#define CLK_VPP1_VPP_SPLIT 14
+#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15
+#define CLK_VPP1_SVPP1_MDP_TDSHP 16
+#define CLK_VPP1_SVPP1_MDP_COLOR 17
+#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 18
+#define CLK_VPP1_SVPP2_VPP_MERGE 19
+#define CLK_VPP1_SVPP2_MDP_COLOR 20
+#define CLK_VPP1_VPPSYS1_GALS 21
+#define CLK_VPP1_SVPP3_VPP_MERGE 22
+#define CLK_VPP1_SVPP3_MDP_COLOR 23
+#define CLK_VPP1_VPPSYS1_LARB 24
+#define CLK_VPP1_SVPP1_MDP_RSZ 25
+#define CLK_VPP1_SVPP1_MDP_HDR 26
+#define CLK_VPP1_SVPP1_MDP_AAL 27
+#define CLK_VPP1_SVPP2_MDP_HDR 28
+#define CLK_VPP1_SVPP2_MDP_AAL 29
+#define CLK_VPP1_DL_ASYNC 30
+#define CLK_VPP1_LARB5_FAKE_ENG 31
+#define CLK_VPP1_SVPP3_MDP_HDR 32
+#define CLK_VPP1_SVPP3_MDP_AAL 33
+#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 34
+#define CLK_VPP1_LARB6_FAKE_ENG 35
+#define CLK_VPP1_SVPP2_MDP_RSZ 36
+#define CLK_VPP1_SVPP3_MDP_RSZ 37
+#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 38
+#define CLK_VPP1_DISP_MUTEX 39
+#define CLK_VPP1_SVPP2_MDP_TDSHP 40
+#define CLK_VPP1_SVPP3_MDP_TDSHP 41
+#define CLK_VPP1_VPP0_DL1_RELAY 42
+#define CLK_VPP1_HDMI_META 43
+#define CLK_VPP1_VPP_SPLIT_HDMI 44
+#define CLK_VPP1_DGI_IN 45
+#define CLK_VPP1_DGI_OUT 46
+#define CLK_VPP1_VPP_SPLIT_DGI 47
+#define CLK_VPP1_VPP0_DL_ASYNC 48
+#define CLK_VPP1_VPP0_DL_RELAY 49
+#define CLK_VPP1_VPP_SPLIT_26M 50
+#define CLK_VPP1_NR_CLK 51
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB9 0
+#define CLK_IMG_TRAW0 1
+#define CLK_IMG_TRAW1 2
+#define CLK_IMG_TRAW2 3
+#define CLK_IMG_TRAW3 4
+#define CLK_IMG_DIP0 5
+#define CLK_IMG_WPE0 6
+#define CLK_IMG_IPE 7
+#define CLK_IMG_DIP1 8
+#define CLK_IMG_WPE1 9
+#define CLK_IMG_GALS 10
+#define CLK_IMG_NR_CLK 11
+
+/* IMGSYS1_DIP_TOP */
+
+#define CLK_IMG1_DIP_TOP_LARB10 0
+#define CLK_IMG1_DIP_TOP_DIP_TOP 1
+#define CLK_IMG1_DIP_TOP_NR_CLK 2
+
+/* IMGSYS1_DIP_NR */
+
+#define CLK_IMG1_DIP_NR_RESERVE 0
+#define CLK_IMG1_DIP_NR_DIP_NR 1
+#define CLK_IMG1_DIP_NR_NR_CLK 2
+
+/* IMGSYS1_WPE */
+
+#define CLK_IMG1_WPE_LARB11 0
+#define CLK_IMG1_WPE_WPE 1
+#define CLK_IMG1_WPE_NR_CLK 2
+
+/* IPESYS */
+
+#define CLK_IPE_DPE 0
+#define CLK_IPE_FDVT 1
+#define CLK_IPE_ME 2
+#define CLK_IPE_TOP 3
+#define CLK_IPE_SMI_LARB12 4
+#define CLK_IPE_NR_CLK 5
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_LARB14 1
+#define CLK_CAM_MAIN_CAM 2
+#define CLK_CAM_MAIN_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAM_GCAMSVA 5
+#define CLK_CAM_GCAMSVB 6
+#define CLK_CAM_GCAMSVC 7
+#define CLK_CAM_SCAMSA 8
+#define CLK_CAM_SCAMSB 9
+#define CLK_CAM_CAMSV_TOP 10
+#define CLK_CAM_CAMSV_CQ 11
+#define CLK_CAM_ADL 12
+#define CLK_CAM_ASG 13
+#define CLK_CAM_PDA 14
+#define CLK_CAM_FAKE_ENG 15
+#define CLK_CAM_MAIN_MRAW0 16
+#define CLK_CAM_MAIN_MRAW1 17
+#define CLK_CAM_MAIN_MRAW2 18
+#define CLK_CAM_MAIN_MRAW3 19
+#define CLK_CAM_CAM2MM0_GALS 20
+#define CLK_CAM_CAM2MM1_GALS 21
+#define CLK_CAM_CAM2SYS_GALS 22
+#define CLK_CAM_NR_CLK 23
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_YUVA */
+
+#define CLK_CAM_YUVA_LARBX 0
+#define CLK_CAM_YUVA_CAM 1
+#define CLK_CAM_YUVA_CAMTG 2
+#define CLK_CAM_YUVA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_YUVB */
+
+#define CLK_CAM_YUVB_LARBX 0
+#define CLK_CAM_YUVB_CAM 1
+#define CLK_CAM_YUVB_CAMTG 2
+#define CLK_CAM_YUVB_NR_CLK 3
+
+/* CAMSYS_MRAW */
+
+#define CLK_CAM_MRAW_LARBX 0
+#define CLK_CAM_MRAW_CAMTG 1
+#define CLK_CAM_MRAW_MRAW0 2
+#define CLK_CAM_MRAW_MRAW1 3
+#define CLK_CAM_MRAW_MRAW2 4
+#define CLK_CAM_MRAW_MRAW3 5
+#define CLK_CAM_MRAW_NR_CLK 6
+
+/* CCUSYS */
+
+#define CLK_CCU_LARB18 0
+#define CLK_CCU_AHB 1
+#define CLK_CCU_CCU0 2
+#define CLK_CCU_CCU1 3
+#define CLK_CCU_NR_CLK 4
+
+/* VDECSYS_SOC */
+
+#define CLK_VDEC_SOC_LARB1 0
+#define CLK_VDEC_SOC_LAT 1
+#define CLK_VDEC_SOC_VDEC 2
+#define CLK_VDEC_SOC_NR_CLK 3
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1 0
+#define CLK_VDEC_LAT 1
+#define CLK_VDEC_VDEC 2
+#define CLK_VDEC_NR_CLK 3
+
+/* VDECSYS_CORE1 */
+
+#define CLK_VDEC_CORE1_LARB1 0
+#define CLK_VDEC_CORE1_LAT 1
+#define CLK_VDEC_CORE1_VDEC 2
+#define CLK_VDEC_CORE1_NR_CLK 3
+
+/* APUSYS_PLL */
+
+#define CLK_APUSYS_PLL_APUPLL 0
+#define CLK_APUSYS_PLL_NPUPLL 1
+#define CLK_APUSYS_PLL_APUPLL1 2
+#define CLK_APUSYS_PLL_APUPLL2 3
+#define CLK_APUSYS_PLL_NR_CLK 4
+
+/* VENCSYS */
+
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_JPGDEC_C1 4
+#define CLK_VENC_GALS 5
+#define CLK_VENC_NR_CLK 6
+
+/* VENCSYS_CORE1 */
+
+#define CLK_VENC_CORE1_LARB 0
+#define CLK_VENC_CORE1_VENC 1
+#define CLK_VENC_CORE1_JPGENC 2
+#define CLK_VENC_CORE1_JPGDEC 3
+#define CLK_VENC_CORE1_JPGDEC_C1 4
+#define CLK_VENC_CORE1_GALS 5
+#define CLK_VENC_CORE1_NR_CLK 6
+
+/* VDOSYS0 */
+
+#define CLK_VDO0_DISP_OVL0 0
+#define CLK_VDO0_DISP_COLOR0 1
+#define CLK_VDO0_DISP_COLOR1 2
+#define CLK_VDO0_DISP_CCORR0 3
+#define CLK_VDO0_DISP_CCORR1 4
+#define CLK_VDO0_DISP_AAL0 5
+#define CLK_VDO0_DISP_AAL1 6
+#define CLK_VDO0_DISP_GAMMA0 7
+#define CLK_VDO0_DISP_GAMMA1 8
+#define CLK_VDO0_DISP_DITHER0 9
+#define CLK_VDO0_DISP_DITHER1 10
+#define CLK_VDO0_DISP_OVL1 11
+#define CLK_VDO0_DISP_WDMA0 12
+#define CLK_VDO0_DISP_WDMA1 13
+#define CLK_VDO0_DISP_RDMA0 14
+#define CLK_VDO0_DISP_RDMA1 15
+#define CLK_VDO0_DSI0 16
+#define CLK_VDO0_DSI1 17
+#define CLK_VDO0_DSC_WRAP0 18
+#define CLK_VDO0_VPP_MERGE0 19
+#define CLK_VDO0_DP_INTF0 20
+#define CLK_VDO0_DISP_MUTEX0 21
+#define CLK_VDO0_DISP_IL_ROT0 22
+#define CLK_VDO0_APB_BUS 23
+#define CLK_VDO0_FAKE_ENG0 24
+#define CLK_VDO0_FAKE_ENG1 25
+#define CLK_VDO0_DL_ASYNC0 26
+#define CLK_VDO0_DL_ASYNC1 27
+#define CLK_VDO0_DL_ASYNC2 28
+#define CLK_VDO0_DL_ASYNC3 29
+#define CLK_VDO0_DL_ASYNC4 30
+#define CLK_VDO0_DISP_MONITOR0 31
+#define CLK_VDO0_DISP_MONITOR1 32
+#define CLK_VDO0_DISP_MONITOR2 33
+#define CLK_VDO0_DISP_MONITOR3 34
+#define CLK_VDO0_DISP_MONITOR4 35
+#define CLK_VDO0_SMI_GALS 36
+#define CLK_VDO0_SMI_COMMON 37
+#define CLK_VDO0_SMI_EMI 38
+#define CLK_VDO0_SMI_IOMMU 39
+#define CLK_VDO0_SMI_LARB 40
+#define CLK_VDO0_SMI_RSI 41
+#define CLK_VDO0_DSI0_DSI 42
+#define CLK_VDO0_DSI1_DSI 43
+#define CLK_VDO0_DP_INTF0_DP_INTF 44
+#define CLK_VDO0_NR_CLK 45
+
+/* VDOSYS1 */
+
+#define CLK_VDO1_SMI_LARB2 0
+#define CLK_VDO1_SMI_LARB3 1
+#define CLK_VDO1_GALS 2
+#define CLK_VDO1_FAKE_ENG0 3
+#define CLK_VDO1_FAKE_ENG 4
+#define CLK_VDO1_MDP_RDMA0 5
+#define CLK_VDO1_MDP_RDMA1 6
+#define CLK_VDO1_MDP_RDMA2 7
+#define CLK_VDO1_MDP_RDMA3 8
+#define CLK_VDO1_VPP_MERGE0 9
+#define CLK_VDO1_VPP_MERGE1 10
+#define CLK_VDO1_VPP_MERGE2 11
+#define CLK_VDO1_VPP_MERGE3 12
+#define CLK_VDO1_VPP_MERGE4 13
+#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14
+#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15
+#define CLK_VDO1_DISP_MUTEX 16
+#define CLK_VDO1_MDP_RDMA4 17
+#define CLK_VDO1_MDP_RDMA5 18
+#define CLK_VDO1_MDP_RDMA6 19
+#define CLK_VDO1_MDP_RDMA7 20
+#define CLK_VDO1_DP_INTF0_MM 21
+#define CLK_VDO1_DPI0_MM 22
+#define CLK_VDO1_DPI1_MM 23
+#define CLK_VDO1_DISP_MONITOR 24
+#define CLK_VDO1_MERGE0_DL_ASYNC 25
+#define CLK_VDO1_MERGE1_DL_ASYNC 26
+#define CLK_VDO1_MERGE2_DL_ASYNC 27
+#define CLK_VDO1_MERGE3_DL_ASYNC 28
+#define CLK_VDO1_MERGE4_DL_ASYNC 29
+#define CLK_VDO1_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define CLK_VDO1_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define CLK_VDO1_HDR_VDO_FE0 32
+#define CLK_VDO1_HDR_GFX_FE0 33
+#define CLK_VDO1_HDR_VDO_BE 34
+#define CLK_VDO1_HDR_VDO_FE1 35
+#define CLK_VDO1_HDR_GFX_FE1 36
+#define CLK_VDO1_DISP_MIXER 37
+#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 38
+#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 39
+#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 40
+#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 41
+#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 42
+#define CLK_VDO1_DPI0 43
+#define CLK_VDO1_DISP_MONITOR_DPI0 44
+#define CLK_VDO1_DPI1 45
+#define CLK_VDO1_DISP_MONITOR_DPI1 46
+#define CLK_VDO1_DPINTF 47
+#define CLK_VDO1_DISP_MONITOR_DPINTF 48
+#define CLK_VDO1_26M_SLOW 49
+#define CLK_VDO1_NR_CLK 50
+
+#endif /* _DT_BINDINGS_CLK_MT8195_H */
diff --git a/include/dt-bindings/clock/qcom,camcc-sc7280.h b/include/dt-bindings/clock/qcom,camcc-sc7280.h
new file mode 100644
index 000000000000..56640f407309
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sc7280.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_AUX 6
+#define CAM_CC_PLL2_OUT_AUX2 7
+#define CAM_CC_PLL3 8
+#define CAM_CC_PLL3_OUT_EVEN 9
+#define CAM_CC_PLL4 10
+#define CAM_CC_PLL4_OUT_EVEN 11
+#define CAM_CC_PLL5 12
+#define CAM_CC_PLL5_OUT_EVEN 13
+#define CAM_CC_PLL6 14
+#define CAM_CC_PLL6_OUT_EVEN 15
+#define CAM_CC_PLL6_OUT_ODD 16
+#define CAM_CC_BPS_AHB_CLK 17
+#define CAM_CC_BPS_AREG_CLK 18
+#define CAM_CC_BPS_AXI_CLK 19
+#define CAM_CC_BPS_CLK 20
+#define CAM_CC_BPS_CLK_SRC 21
+#define CAM_CC_CAMNOC_AXI_CLK 22
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 23
+#define CAM_CC_CAMNOC_DCD_XO_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CSI0PHYTIMER_CLK 32
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSI1PHYTIMER_CLK 34
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSI2PHYTIMER_CLK 36
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 37
+#define CAM_CC_CSI3PHYTIMER_CLK 38
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 39
+#define CAM_CC_CSI4PHYTIMER_CLK 40
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 41
+#define CAM_CC_CSIPHY0_CLK 42
+#define CAM_CC_CSIPHY1_CLK 43
+#define CAM_CC_CSIPHY2_CLK 44
+#define CAM_CC_CSIPHY3_CLK 45
+#define CAM_CC_CSIPHY4_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_AHB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_IFE_0_AXI_CLK 52
+#define CAM_CC_IFE_0_CLK 53
+#define CAM_CC_IFE_0_CLK_SRC 54
+#define CAM_CC_IFE_0_CPHY_RX_CLK 55
+#define CAM_CC_IFE_0_CSID_CLK 56
+#define CAM_CC_IFE_0_CSID_CLK_SRC 57
+#define CAM_CC_IFE_0_DSP_CLK 58
+#define CAM_CC_IFE_1_AXI_CLK 59
+#define CAM_CC_IFE_1_CLK 60
+#define CAM_CC_IFE_1_CLK_SRC 61
+#define CAM_CC_IFE_1_CPHY_RX_CLK 62
+#define CAM_CC_IFE_1_CSID_CLK 63
+#define CAM_CC_IFE_1_CSID_CLK_SRC 64
+#define CAM_CC_IFE_1_DSP_CLK 65
+#define CAM_CC_IFE_2_AXI_CLK 66
+#define CAM_CC_IFE_2_CLK 67
+#define CAM_CC_IFE_2_CLK_SRC 68
+#define CAM_CC_IFE_2_CPHY_RX_CLK 69
+#define CAM_CC_IFE_2_CSID_CLK 70
+#define CAM_CC_IFE_2_CSID_CLK_SRC 71
+#define CAM_CC_IFE_2_DSP_CLK 72
+#define CAM_CC_IFE_LITE_0_CLK 73
+#define CAM_CC_IFE_LITE_0_CLK_SRC 74
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 75
+#define CAM_CC_IFE_LITE_0_CSID_CLK 76
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 77
+#define CAM_CC_IFE_LITE_1_CLK 78
+#define CAM_CC_IFE_LITE_1_CLK_SRC 79
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 80
+#define CAM_CC_IFE_LITE_1_CSID_CLK 81
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 82
+#define CAM_CC_IPE_0_AHB_CLK 83
+#define CAM_CC_IPE_0_AREG_CLK 84
+#define CAM_CC_IPE_0_AXI_CLK 85
+#define CAM_CC_IPE_0_CLK 86
+#define CAM_CC_IPE_0_CLK_SRC 87
+#define CAM_CC_JPEG_CLK 88
+#define CAM_CC_JPEG_CLK_SRC 89
+#define CAM_CC_LRME_CLK 90
+#define CAM_CC_LRME_CLK_SRC 91
+#define CAM_CC_MCLK0_CLK 92
+#define CAM_CC_MCLK0_CLK_SRC 93
+#define CAM_CC_MCLK1_CLK 94
+#define CAM_CC_MCLK1_CLK_SRC 95
+#define CAM_CC_MCLK2_CLK 96
+#define CAM_CC_MCLK2_CLK_SRC 97
+#define CAM_CC_MCLK3_CLK 98
+#define CAM_CC_MCLK3_CLK_SRC 99
+#define CAM_CC_MCLK4_CLK 100
+#define CAM_CC_MCLK4_CLK_SRC 101
+#define CAM_CC_MCLK5_CLK 102
+#define CAM_CC_MCLK5_CLK_SRC 103
+#define CAM_CC_SLEEP_CLK 104
+#define CAM_CC_SLEEP_CLK_SRC 105
+#define CAM_CC_SLOW_AHB_CLK_SRC 106
+#define CAM_CC_XO_CLK_SRC 107
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IFE_2_GDSC 3
+#define CAM_CC_IPE_0_GDSC 4
+#define CAM_CC_TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h
index 507b8d6effd2..f6836f430bb5 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8994.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h
@@ -148,6 +148,18 @@
#define GCC_USB30_SLEEP_CLK 138
#define GCC_USB_HS_AHB_CLK 139
#define GCC_USB_PHY_CFG_AHB2PHY_CLK 140
+#define CONFIG_NOC_CLK_SRC 141
+#define PERIPH_NOC_CLK_SRC 142
+#define SYSTEM_NOC_CLK_SRC 143
+#define GPLL0_OUT_MMSSCC 144
+#define GPLL0_OUT_MSSCC 145
+#define PCIE_0_PHY_LDO 146
+#define PCIE_1_PHY_LDO 147
+#define UFS_PHY_LDO 148
+#define USB_SS_PHY_LDO 149
+#define GCC_BOOT_ROM_AHB_CLK 150
+#define GCC_PRNG_AHB_CLK 151
+#define GCC_USB3_PHY_PIPE_CLK 152
/* GDSCs */
#define PCIE_GDSC 0
@@ -162,5 +174,6 @@
#define PCIE_PHY_0_RESET 2
#define PCIE_PHY_1_RESET 3
#define QUSB2_PHY_RESET 4
+#define MSS_RESET 5
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-qcm2290.h b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
new file mode 100644
index 000000000000..8d907035f9e4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_AUX2 1
+#define GPLL1 2
+#define GPLL10 3
+#define GPLL11 4
+#define GPLL3 5
+#define GPLL3_OUT_MAIN 6
+#define GPLL4 7
+#define GPLL5 8
+#define GPLL6 9
+#define GPLL6_OUT_MAIN 10
+#define GPLL7 11
+#define GPLL8 12
+#define GPLL8_OUT_MAIN 13
+#define GPLL9 14
+#define GPLL9_OUT_MAIN 15
+#define GCC_AHB2PHY_CSI_CLK 16
+#define GCC_AHB2PHY_USB_CLK 17
+#define GCC_APC_VS_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_CLK_SRC 30
+#define GCC_CAMSS_CPHY_0_CLK 31
+#define GCC_CAMSS_CPHY_1_CLK 32
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 33
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 34
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 35
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 36
+#define GCC_CAMSS_MCLK0_CLK 37
+#define GCC_CAMSS_MCLK0_CLK_SRC 38
+#define GCC_CAMSS_MCLK1_CLK 39
+#define GCC_CAMSS_MCLK1_CLK_SRC 40
+#define GCC_CAMSS_MCLK2_CLK 41
+#define GCC_CAMSS_MCLK2_CLK_SRC 42
+#define GCC_CAMSS_MCLK3_CLK 43
+#define GCC_CAMSS_MCLK3_CLK_SRC 44
+#define GCC_CAMSS_NRT_AXI_CLK 45
+#define GCC_CAMSS_OPE_AHB_CLK 46
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 47
+#define GCC_CAMSS_OPE_CLK 48
+#define GCC_CAMSS_OPE_CLK_SRC 49
+#define GCC_CAMSS_RT_AXI_CLK 50
+#define GCC_CAMSS_TFE_0_CLK 51
+#define GCC_CAMSS_TFE_0_CLK_SRC 52
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 53
+#define GCC_CAMSS_TFE_0_CSID_CLK 54
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 55
+#define GCC_CAMSS_TFE_1_CLK 56
+#define GCC_CAMSS_TFE_1_CLK_SRC 57
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 58
+#define GCC_CAMSS_TFE_1_CSID_CLK 59
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 60
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 61
+#define GCC_CAMSS_TOP_AHB_CLK 62
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 63
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 64
+#define GCC_CPUSS_AHB_CLK 65
+#define GCC_CPUSS_AHB_CLK_SRC 66
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 67
+#define GCC_CPUSS_GNOC_CLK 68
+#define GCC_CPUSS_THROTTLE_CORE_CLK 69
+#define GCC_CPUSS_THROTTLE_XO_CLK 70
+#define GCC_DISP_AHB_CLK 71
+#define GCC_DISP_GPLL0_CLK_SRC 72
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 73
+#define GCC_DISP_HF_AXI_CLK 74
+#define GCC_DISP_THROTTLE_CORE_CLK 75
+#define GCC_DISP_XO_CLK 76
+#define GCC_GP1_CLK 77
+#define GCC_GP1_CLK_SRC 78
+#define GCC_GP2_CLK 79
+#define GCC_GP2_CLK_SRC 80
+#define GCC_GP3_CLK 81
+#define GCC_GP3_CLK_SRC 82
+#define GCC_GPU_CFG_AHB_CLK 83
+#define GCC_GPU_GPLL0_CLK_SRC 84
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 85
+#define GCC_GPU_IREF_CLK 86
+#define GCC_GPU_MEMNOC_GFX_CLK 87
+#define GCC_GPU_SNOC_DVM_GFX_CLK 88
+#define GCC_GPU_THROTTLE_CORE_CLK 89
+#define GCC_GPU_THROTTLE_XO_CLK 90
+#define GCC_PDM2_CLK 91
+#define GCC_PDM2_CLK_SRC 92
+#define GCC_PDM_AHB_CLK 93
+#define GCC_PDM_XO4_CLK 94
+#define GCC_PWM0_XO512_CLK 95
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 96
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 97
+#define GCC_QMIP_CPUSS_CFG_AHB_CLK 98
+#define GCC_QMIP_DISP_AHB_CLK 99
+#define GCC_QMIP_GPU_CFG_AHB_CLK 100
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 101
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 102
+#define GCC_QUPV3_WRAP0_CORE_CLK 103
+#define GCC_QUPV3_WRAP0_S0_CLK 104
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 105
+#define GCC_QUPV3_WRAP0_S1_CLK 106
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 107
+#define GCC_QUPV3_WRAP0_S2_CLK 108
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 109
+#define GCC_QUPV3_WRAP0_S3_CLK 110
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 111
+#define GCC_QUPV3_WRAP0_S4_CLK 112
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 113
+#define GCC_QUPV3_WRAP0_S5_CLK 114
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 115
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 116
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 117
+#define GCC_SDCC1_AHB_CLK 118
+#define GCC_SDCC1_APPS_CLK 119
+#define GCC_SDCC1_APPS_CLK_SRC 120
+#define GCC_SDCC1_ICE_CORE_CLK 121
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 122
+#define GCC_SDCC2_AHB_CLK 123
+#define GCC_SDCC2_APPS_CLK 124
+#define GCC_SDCC2_APPS_CLK_SRC 125
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 126
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 127
+#define GCC_USB30_PRIM_MASTER_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 137
+#define GCC_VCODEC0_AXI_CLK 138
+#define GCC_VENUS_AHB_CLK 139
+#define GCC_VENUS_CTL_AXI_CLK 140
+#define GCC_VIDEO_AHB_CLK 141
+#define GCC_VIDEO_AXI0_CLK 142
+#define GCC_VIDEO_THROTTLE_CORE_CLK 143
+#define GCC_VIDEO_VCODEC0_SYS_CLK 144
+#define GCC_VIDEO_VENUS_CLK_SRC 145
+#define GCC_VIDEO_VENUS_CTL_CLK 146
+#define GCC_VIDEO_XO_CLK 147
+
+/* GCC resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_SDCC1_BCR 7
+#define GCC_SDCC2_BCR 8
+#define GCC_USB30_PRIM_BCR 9
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 10
+#define GCC_VCODEC0_BCR 11
+#define GCC_VENUS_BCR 12
+#define GCC_VIDEO_INTERFACE_BCR 13
+#define GCC_QUSB2PHY_PRIM_BCR 14
+#define GCC_USB3_PHY_PRIM_SP0_BCR 15
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 16
+
+/* Indexes for GDSCs */
+#define GCC_CAMSS_TOP_GDSC 0
+#define GCC_USB30_PRIM_GDSC 1
+#define GCC_VCODEC0_GDSC 2
+#define GCC_VENUS_GDSC 3
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 4
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpass-sc7280.h b/include/dt-bindings/clock/qcom,lpass-sc7280.h
new file mode 100644
index 000000000000..e71ccac3a375
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpass-sc7280.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+
+#define LPASS_Q6SS_AHBM_CLK 0
+#define LPASS_Q6SS_AHBS_CLK 1
+#define LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK 2
+#define LPASS_QDSP6SS_XO_CLK 3
+#define LPASS_QDSP6SS_SLEEP_CLK 4
+#define LPASS_QDSP6SS_CORE_CLK 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index aa834d516234..fb624ff39273 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -159,5 +159,11 @@
#define RPM_SMD_SNOC_PERIPH_A_CLK 113
#define RPM_SMD_SNOC_LPASS_CLK 114
#define RPM_SMD_SNOC_LPASS_A_CLK 115
+#define RPM_SMD_HWKM_CLK 116
+#define RPM_SMD_HWKM_A_CLK 117
+#define RPM_SMD_PKA_CLK 118
+#define RPM_SMD_PKA_A_CLK 119
+#define RPM_SMD_CPUSS_GNOC_CLK 120
+#define RPM_SMD_CPUSS_GNOC_A_CLK 121
#endif
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
index 52b619d44ba2..3be89a7c20a9 100644
--- a/include/dt-bindings/leds/common.h
+++ b/include/dt-bindings/leds/common.h
@@ -60,6 +60,13 @@
#define LED_FUNCTION_MICMUTE "micmute"
#define LED_FUNCTION_MUTE "mute"
+/* Used for player LEDs as found on game controllers from e.g. Nintendo, Sony. */
+#define LED_FUNCTION_PLAYER1 "player-1"
+#define LED_FUNCTION_PLAYER2 "player-2"
+#define LED_FUNCTION_PLAYER3 "player-3"
+#define LED_FUNCTION_PLAYER4 "player-4"
+#define LED_FUNCTION_PLAYER5 "player-5"
+
/* Miscelleaus functions. Use functions above if you can. */
#define LED_FUNCTION_ACTIVITY "activity"
#define LED_FUNCTION_ALARM "alarm"
diff --git a/include/dt-bindings/phy/phy-cadence.h b/include/dt-bindings/phy/phy-cadence.h
index 4652bcb86265..24fdc9e11bd6 100644
--- a/include/dt-bindings/phy/phy-cadence.h
+++ b/include/dt-bindings/phy/phy-cadence.h
@@ -12,6 +12,8 @@
#define TORRENT_SERDES_INTERNAL_SSC 2
#define CDNS_TORRENT_REFCLK_DRIVER 0
+#define CDNS_TORRENT_DERIVED_REFCLK 1
+#define CDNS_TORRENT_RECEIVED_REFCLK 2
/* Sierra */
#define CDNS_SIERRA_PLL_CMNLC 0
diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h
index 7e16e58fe1f7..f5934abcd1bd 100644
--- a/include/dt-bindings/pinctrl/mt65xx.h
+++ b/include/dt-bindings/pinctrl/mt65xx.h
@@ -16,6 +16,15 @@
#define MTK_PUPD_SET_R1R0_10 102
#define MTK_PUPD_SET_R1R0_11 103
+#define MTK_PULL_SET_RSEL_000 200
+#define MTK_PULL_SET_RSEL_001 201
+#define MTK_PULL_SET_RSEL_010 202
+#define MTK_PULL_SET_RSEL_011 203
+#define MTK_PULL_SET_RSEL_100 204
+#define MTK_PULL_SET_RSEL_101 205
+#define MTK_PULL_SET_RSEL_110 206
+#define MTK_PULL_SET_RSEL_111 207
+
#define MTK_DRIVE_2mA 2
#define MTK_DRIVE_4mA 4
#define MTK_DRIVE_6mA 6
diff --git a/include/dt-bindings/power/imx8mm-power.h b/include/dt-bindings/power/imx8mm-power.h
index fc9c2e16aadc..648938f24c8e 100644
--- a/include/dt-bindings/power/imx8mm-power.h
+++ b/include/dt-bindings/power/imx8mm-power.h
@@ -19,4 +19,13 @@
#define IMX8MM_POWER_DOMAIN_DISPMIX 10
#define IMX8MM_POWER_DOMAIN_MIPI 11
+#define IMX8MM_VPUBLK_PD_G1 0
+#define IMX8MM_VPUBLK_PD_G2 1
+#define IMX8MM_VPUBLK_PD_H1 2
+
+#define IMX8MM_DISPBLK_PD_CSI_BRIDGE 0
+#define IMX8MM_DISPBLK_PD_LCDIF 1
+#define IMX8MM_DISPBLK_PD_MIPI_DSI 2
+#define IMX8MM_DISPBLK_PD_MIPI_CSI 3
+
#endif
diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h
deleted file mode 100644
index ec336d31dee4..000000000000
--- a/include/dt-bindings/power/qcom-aoss-qmp.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Linaro Ltd. */
-
-#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-
-#define AOSS_QMP_LS_CDSP 0
-#define AOSS_QMP_LS_LPASS 1
-#define AOSS_QMP_LS_MODEM 2
-#define AOSS_QMP_LS_SLPI 3
-#define AOSS_QMP_LS_SPSS 4
-#define AOSS_QMP_LS_VENUS 5
-
-#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 4533dbbf9937..960f7976a807 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -20,6 +20,14 @@
#define SDX55_MX 1
#define SDX55_CX 2
+/* SM6350 Power Domain Indexes */
+#define SM6350_CX 0
+#define SM6350_GFX 1
+#define SM6350_LCX 2
+#define SM6350_LMX 3
+#define SM6350_MSS 4
+#define SM6350_MX 5
+
/* SM8150 Power Domain Indexes */
#define SM8150_MSS 0
#define SM8150_EBI 1
@@ -133,6 +141,15 @@
#define MSM8916_VDDMX 3
#define MSM8916_VDDMX_AO 4
+/* MSM8953 Power Domain Indexes */
+#define MSM8953_VDDMD 0
+#define MSM8953_VDDMD_AO 1
+#define MSM8953_VDDCX 2
+#define MSM8953_VDDCX_AO 3
+#define MSM8953_VDDCX_VFL 4
+#define MSM8953_VDDMX 5
+#define MSM8953_VDDMX_AO 6
+
/* MSM8976 Power Domain Indexes */
#define MSM8976_VDDCX 0
#define MSM8976_VDDCX_AO 1
diff --git a/include/dt-bindings/reset/imx8ulp-pcc-reset.h b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
new file mode 100644
index 000000000000..e99a4735c3c4
--- /dev/null
+++ b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef DT_BINDING_PCC_RESET_IMX8ULP_H
+#define DT_BINDING_PCC_RESET_IMX8ULP_H
+
+/* PCC3 */
+#define PCC3_WDOG3_SWRST 0
+#define PCC3_WDOG4_SWRST 1
+#define PCC3_LPIT1_SWRST 2
+#define PCC3_TPM4_SWRST 3
+#define PCC3_TPM5_SWRST 4
+#define PCC3_FLEXIO1_SWRST 5
+#define PCC3_I3C2_SWRST 6
+#define PCC3_LPI2C4_SWRST 7
+#define PCC3_LPI2C5_SWRST 8
+#define PCC3_LPUART4_SWRST 9
+#define PCC3_LPUART5_SWRST 10
+#define PCC3_LPSPI4_SWRST 11
+#define PCC3_LPSPI5_SWRST 12
+
+/* PCC4 */
+#define PCC4_FLEXSPI2_SWRST 0
+#define PCC4_TPM6_SWRST 1
+#define PCC4_TPM7_SWRST 2
+#define PCC4_LPI2C6_SWRST 3
+#define PCC4_LPI2C7_SWRST 4
+#define PCC4_LPUART6_SWRST 5
+#define PCC4_LPUART7_SWRST 6
+#define PCC4_SAI4_SWRST 7
+#define PCC4_SAI5_SWRST 8
+#define PCC4_USDHC0_SWRST 9
+#define PCC4_USDHC1_SWRST 10
+#define PCC4_USDHC2_SWRST 11
+#define PCC4_USB0_SWRST 12
+#define PCC4_USB0_PHY_SWRST 13
+#define PCC4_USB1_SWRST 14
+#define PCC4_USB1_PHY_SWRST 15
+#define PCC4_ENET_SWRST 16
+
+/* PCC5 */
+#define PCC5_TPM8_SWRST 0
+#define PCC5_SAI6_SWRST 1
+#define PCC5_SAI7_SWRST 2
+#define PCC5_SPDIF_SWRST 3
+#define PCC5_ISI_SWRST 4
+#define PCC5_CSI_REGS_SWRST 5
+#define PCC5_CSI_SWRST 6
+#define PCC5_DSI_SWRST 7
+#define PCC5_WDOG5_SWRST 8
+#define PCC5_EPDC_SWRST 9
+#define PCC5_PXP_SWRST 10
+#define PCC5_GPU2D_SWRST 11
+#define PCC5_GPU3D_SWRST 12
+#define PCC5_DC_NANO_SWRST 13
+
+#endif /*DT_BINDING_RESET_IMX8ULP_H */
diff --git a/include/dt-bindings/reset-controller/mt2712-resets.h b/include/dt-bindings/reset/mt2712-resets.h
index 9e7ee762f076..9e7ee762f076 100644
--- a/include/dt-bindings/reset-controller/mt2712-resets.h
+++ b/include/dt-bindings/reset/mt2712-resets.h
diff --git a/include/dt-bindings/reset/mt8173-resets.h b/include/dt-bindings/reset/mt8173-resets.h
index ba8636eda5ae..6a60c7cecc4c 100644
--- a/include/dt-bindings/reset/mt8173-resets.h
+++ b/include/dt-bindings/reset/mt8173-resets.h
@@ -27,6 +27,8 @@
#define MT8173_INFRA_GCE_FAXI_RST 40
#define MT8173_INFRA_MMIOMMURST 47
+/* MMSYS resets */
+#define MT8173_MMSYS_SW0_RST_B_DISP_DSI0 25
/* PERICFG resets */
#define MT8173_PERI_UART0_SW_RST 0
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset/mt8183-resets.h
index a1bbd41e0d12..48c5d2de0a38 100644
--- a/include/dt-bindings/reset-controller/mt8183-resets.h
+++ b/include/dt-bindings/reset/mt8183-resets.h
@@ -80,6 +80,9 @@
#define MT8183_INFRACFG_SW_RST_NUM 128
+/* MMSYS resets */
+#define MT8183_MMSYS_SW0_RST_B_DISP_DSI0 25
+
#define MT8183_TOPRGU_MM_SW_RST 1
#define MT8183_TOPRGU_MFG_SW_RST 2
#define MT8183_TOPRGU_VENC_SW_RST 3
diff --git a/include/dt-bindings/reset-controller/mt8192-resets.h b/include/dt-bindings/reset/mt8192-resets.h
index be9a7ca245b9..be9a7ca245b9 100644
--- a/include/dt-bindings/reset-controller/mt8192-resets.h
+++ b/include/dt-bindings/reset/mt8192-resets.h
diff --git a/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
new file mode 100644
index 000000000000..ea906896c70f
--- /dev/null
+++ b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_STE_PRCC_RESET
+#define _DT_BINDINGS_STE_PRCC_RESET
+
+#define DB8500_PRCC_1 1
+#define DB8500_PRCC_2 2
+#define DB8500_PRCC_3 3
+#define DB8500_PRCC_6 6
+
+/* Reset lines on PRCC 1 */
+#define DB8500_PRCC_1_RESET_UART0 0
+#define DB8500_PRCC_1_RESET_UART1 1
+#define DB8500_PRCC_1_RESET_I2C1 2
+#define DB8500_PRCC_1_RESET_MSP0 3
+#define DB8500_PRCC_1_RESET_MSP1 4
+#define DB8500_PRCC_1_RESET_SDI0 5
+#define DB8500_PRCC_1_RESET_I2C2 6
+#define DB8500_PRCC_1_RESET_SPI3 7
+#define DB8500_PRCC_1_RESET_SLIMBUS0 8
+#define DB8500_PRCC_1_RESET_I2C4 9
+#define DB8500_PRCC_1_RESET_MSP3 10
+#define DB8500_PRCC_1_RESET_PER_MSP3 11
+#define DB8500_PRCC_1_RESET_PER_MSP1 12
+#define DB8500_PRCC_1_RESET_PER_MSP0 13
+#define DB8500_PRCC_1_RESET_PER_SLIMBUS 14
+
+/* Reset lines on PRCC 2 */
+#define DB8500_PRCC_2_RESET_I2C3 0
+#define DB8500_PRCC_2_RESET_PWL 1
+#define DB8500_PRCC_2_RESET_SDI4 2
+#define DB8500_PRCC_2_RESET_MSP2 3
+#define DB8500_PRCC_2_RESET_SDI1 4
+#define DB8500_PRCC_2_RESET_SDI3 5
+#define DB8500_PRCC_2_RESET_HSIRX 6
+#define DB8500_PRCC_2_RESET_HSITX 7
+#define DB8500_PRCC_1_RESET_PER_MSP2 8
+
+/* Reset lines on PRCC 3 */
+#define DB8500_PRCC_3_RESET_SSP0 1
+#define DB8500_PRCC_3_RESET_SSP1 2
+#define DB8500_PRCC_3_RESET_I2C0 3
+#define DB8500_PRCC_3_RESET_SDI2 4
+#define DB8500_PRCC_3_RESET_SKE 5
+#define DB8500_PRCC_3_RESET_UART2 6
+#define DB8500_PRCC_3_RESET_SDI5 7
+
+/* Reset lines on PRCC 6 */
+#define DB8500_PRCC_3_RESET_RNG 0
+
+#endif
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 018e776a34b9..b26400731c02 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -11,11 +11,20 @@
#include <kunit/assert.h>
#include <kunit/try-catch.h>
-#include <linux/kernel.h>
+
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kconfig.h>
+#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/types.h>
-#include <linux/kref.h>
+
+#include <asm/rwonce.h>
struct kunit_resource;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 974d497a897d..143ce7e0bee1 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -577,7 +577,6 @@ extern u32 osc_sb_native_usb4_control;
#define OSC_PCI_MSI_SUPPORT 0x00000010
#define OSC_PCI_EDR_SUPPORT 0x00000080
#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
-#define OSC_PCI_SUPPORT_MASKS 0x0000019f
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -587,7 +586,6 @@ extern u32 osc_sb_native_usb4_control;
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
-#define OSC_PCI_CONTROL_MASKS 0x000000bf
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -1016,6 +1014,7 @@ int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
bool acpi_storage_d3(struct device *dev);
+bool acpi_dev_state_d0(struct device *dev);
#else
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
@@ -1027,6 +1026,10 @@ static inline bool acpi_storage_d3(struct device *dev)
{
return false;
}
+static inline bool acpi_dev_state_d0(struct device *dev)
+{
+ return true;
+}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -1353,6 +1356,7 @@ static inline int lpit_read_residency_count_address(u64 *address)
#ifdef CONFIG_ACPI_PPTT
int acpi_pptt_cpu_is_thread(unsigned int cpu);
int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_cluster(unsigned int cpu);
int find_acpi_cpu_topology_package(unsigned int cpu);
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
@@ -1365,6 +1369,10 @@ static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
{
return -EINVAL;
}
+static inline int find_acpi_cpu_topology_cluster(unsigned int cpu)
+{
+ return -EINVAL;
+}
static inline int find_acpi_cpu_topology_package(unsigned int cpu)
{
return -EINVAL;
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index c68d87b87283..edfcf7a14dcd 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -122,24 +122,6 @@ struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
void amba_device_put(struct amba_device *);
int amba_device_add(struct amba_device *, struct resource *);
int amba_device_register(struct amba_device *, struct resource *);
-struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
void amba_device_unregister(struct amba_device *);
struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
int amba_request_regions(struct amba_device *, const char *);
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 71881a2b6f78..5deaddbd7927 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -15,6 +15,10 @@ struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_secure(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
int anon_inode_getfd_secure(const char *name,
diff --git a/include/linux/apple-mailbox.h b/include/linux/apple-mailbox.h
new file mode 100644
index 000000000000..720fbb70294a
--- /dev/null
+++ b/include/linux/apple-mailbox.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple mailbox message format
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ */
+
+#ifndef _LINUX_APPLE_MAILBOX_H_
+#define _LINUX_APPLE_MAILBOX_H_
+
+#include <linux/types.h>
+
+/* encodes a single 96bit message sent over the single channel */
+struct apple_mbox_msg {
+ u64 msg0;
+ u32 msg1;
+};
+
+#endif
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index f180240dc95f..b97cea83b25e 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -62,10 +62,12 @@ void topology_set_thermal_pressure(const struct cpumask *cpus,
struct cpu_topology {
int thread_id;
int core_id;
+ int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
+ cpumask_t cluster_sibling;
cpumask_t llc_sibling;
};
@@ -73,13 +75,16 @@ struct cpu_topology {
extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
+#define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+const struct cpumask *cpu_clustergroup_mask(int cpu);
void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 505c679b6a9b..85651e41ded8 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -262,6 +262,8 @@ struct ffa_dev_ops {
int (*memory_reclaim)(u64 g_handle, u32 flags);
int (*memory_share)(struct ffa_device *dev,
struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_device *dev,
+ struct ffa_mem_ops_args *args);
};
#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 1b44f40c7700..199e47e97d64 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -329,6 +329,7 @@ enum {
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device SATA settings log:*/
ATA_LOG_DEVSLP_OFFSET = 0x30,
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 82b7c1116a85..d06134ac6245 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
+#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
#include <uapi/linux/netfilter/nf_tables.h>
@@ -286,7 +287,10 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
/* These are defined in auditsc.c */
/* Public API */
extern int audit_alloc(struct task_struct *task);
+extern int audit_alloc_kernel(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
+extern void __audit_uring_entry(u8 op);
+extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
@@ -323,6 +327,21 @@ static inline void audit_free(struct task_struct *task)
if (unlikely(task->audit_context))
__audit_free(task);
}
+static inline void audit_uring_entry(u8 op)
+{
+ /*
+ * We intentionally check audit_context() before audit_enabled as most
+ * Linux systems (as of ~2021) rely on systemd which forces audit to
+ * be enabled regardless of the user's audit configuration.
+ */
+ if (unlikely(audit_context() && audit_enabled))
+ __audit_uring_entry(op);
+}
+static inline void audit_uring_exit(int success, long code)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_uring_exit(success, code);
+}
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -398,6 +417,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *old);
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
+extern void __audit_openat2_how(struct open_how *how);
extern void __audit_log_kern_module(char *name);
extern void __audit_fanotify(unsigned int response);
extern void __audit_tk_injoffset(struct timespec64 offset);
@@ -494,6 +514,12 @@ static inline void audit_mmap_fd(int fd, int flags)
__audit_mmap_fd(fd, flags);
}
+static inline void audit_openat2_how(struct open_how *how)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_openat2_how(how);
+}
+
static inline void audit_log_kern_module(char *name)
{
if (!audit_dummy_context())
@@ -554,8 +580,16 @@ static inline int audit_alloc(struct task_struct *task)
{
return 0;
}
+static inline int audit_alloc_kernel(struct task_struct *task)
+{
+ return 0;
+}
static inline void audit_free(struct task_struct *task)
{ }
+static inline void audit_uring_entry(u8 op)
+{ }
+static inline void audit_uring_exit(int success, long code)
+{ }
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -645,6 +679,9 @@ static inline void audit_log_capset(const struct cred *new,
static inline void audit_mmap_fd(int fd, int flags)
{ }
+static inline void audit_openat2_how(struct open_how *how)
+{ }
+
static inline void audit_log_kern_module(char *name)
{
}
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
new file mode 100644
index 000000000000..8fdb1afe251a
--- /dev/null
+++ b/include/linux/audit_arch.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* audit_arch.h -- Arch layer specific support for audit
+ *
+ * Copyright 2021 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * Author: Richard Guy Briggs <rgb@redhat.com>
+ */
+#ifndef _LINUX_AUDIT_ARCH_H_
+#define _LINUX_AUDIT_ARCH_H_
+
+enum auditsc_class_t {
+ AUDITSC_NATIVE = 0,
+ AUDITSC_COMPAT,
+ AUDITSC_OPEN,
+ AUDITSC_OPENAT,
+ AUDITSC_SOCKETCALL,
+ AUDITSC_EXECVE,
+ AUDITSC_OPENAT2,
+
+ AUDITSC_NVALS /* count */
+};
+
+#endif
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index db0e099c2399..b30a1bc74fc7 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -238,27 +238,26 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
-#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
-#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
-#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
-#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000
-#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000
-
-/* Define below the capability flags that are not offloads */
-#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1)
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
+/* used to negotiate communicating link speeds in Mbps */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
+
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 33207004cfde..993c5628a726 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -103,6 +103,9 @@ struct wb_completion {
* change as blkcg is disabled and enabled higher up in the hierarchy, a wb
* is tested for blkcg after lookup and removed from index on mismatch so
* that a new wb for the combination can be created.
+ *
+ * Each bdi_writeback that is not embedded into the backing_dev_info must hold
+ * a reference to the parent backing_dev_info. See cgwb_create() for details.
*/
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index ac7f231b8825..483979c1b9f4 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -12,13 +12,13 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/writeback.h>
-#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
+struct blkcg;
+
static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
{
kref_get(&bdi->refcnt);
@@ -64,7 +64,7 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_wb_stat(struct bdi_writeback *wb,
+static inline void wb_stat_mod(struct bdi_writeback *wb,
enum wb_stat_item item, s64 amount)
{
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
@@ -72,12 +72,12 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, 1);
+ wb_stat_mod(wb, item, 1);
}
static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, -1);
+ wb_stat_mod(wb, item, -1);
}
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
@@ -133,20 +133,7 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
return test_bit(WB_writeback_running, &wb->state);
}
-static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
-{
- struct super_block *sb;
-
- if (!inode)
- return &noop_backing_dev_info;
-
- sb = inode->i_sb;
-#ifdef CONFIG_BLOCK
- if (sb_is_blkdev_sb(sb))
- return I_BDEV(inode)->bd_disk->bdi;
-#endif
- return sb->s_bdi;
-}
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
{
@@ -154,7 +141,6 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
}
long congestion_wait(int sync, long timeout);
-long wait_iff_congested(int sync, long timeout);
static inline bool mapping_can_writeback(struct address_space *mapping)
{
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 00952e92eae1..fe6bdfbbef66 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -6,19 +6,10 @@
#define __LINUX_BIO_H
#include <linux/mempool.h>
-#include <linux/ioprio.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
#include <linux/uio.h>
-#define BIO_DEBUG
-
-#ifdef BIO_DEBUG
-#define BIO_BUG_ON BUG_ON
-#else
-#define BIO_BUG_ON
-#endif
-
#define BIO_MAX_VECS 256U
static inline unsigned int bio_max_segs(unsigned int nr_segs)
@@ -78,22 +69,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio)
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
-static inline bool bio_mergeable(struct bio *bio)
-{
- if (bio->bi_opf & REQ_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-static inline unsigned int bio_cur_bytes(struct bio *bio)
-{
- if (bio_has_data(bio))
- return bio_iovec(bio).bv_len;
- else /* dataless requests such as discard */
- return bio->bi_iter.bi_size;
-}
-
static inline void *bio_data(struct bio *bio)
{
if (bio_has_data(bio))
@@ -102,25 +77,6 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
-/**
- * bio_full - check if the bio is full
- * @bio: bio to check
- * @len: length of one segment to be added
- *
- * Return true if @bio is full and one segment with @len bytes can't be
- * added to the bio, otherwise return false
- */
-static inline bool bio_full(struct bio *bio, unsigned len)
-{
- if (bio->bi_vcnt >= bio->bi_max_vecs)
- return true;
-
- if (bio->bi_iter.bi_size > UINT_MAX - len)
- return true;
-
- return false;
-}
-
static inline bool bio_next_segment(const struct bio *bio,
struct bvec_iter_all *iter)
{
@@ -163,6 +119,28 @@ static inline void bio_advance_iter_single(const struct bio *bio,
bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
}
+void __bio_advance(struct bio *, unsigned bytes);
+
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio: bio to advance
+ * @bytes: number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+static inline void bio_advance(struct bio *bio, unsigned int nbytes)
+{
+ if (nbytes == bio->bi_iter.bi_size) {
+ bio->bi_iter.bi_size = 0;
+ return;
+ }
+ __bio_advance(bio, nbytes);
+}
+
#define __bio_for_each_segment(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
@@ -265,37 +243,6 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
-static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
-{
- *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
-}
-
-static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
-{
- struct bvec_iter iter = bio->bi_iter;
- int idx;
-
- bio_get_first_bvec(bio, bv);
- if (bv->bv_len == bio->bi_iter.bi_size)
- return; /* this bio only has a single bvec */
-
- bio_advance_iter(bio, &iter, iter.bi_size);
-
- if (!iter.bi_bvec_done)
- idx = iter.bi_idx - 1;
- else /* in the middle of bvec */
- idx = iter.bi_idx;
-
- *bv = bio->bi_io_vec[idx];
-
- /*
- * iter.bi_bvec_done records actual length of the last bvec
- * if this bio ends in the middle of one io vector
- */
- if (iter.bi_bvec_done)
- bv->bv_len = iter.bi_bvec_done;
-}
-
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
@@ -424,7 +371,7 @@ static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
}
-extern blk_qc_t submit_bio(struct bio *);
+void submit_bio(struct bio *bio);
extern void bio_endio(struct bio *);
@@ -456,8 +403,6 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
struct request_queue;
extern int submit_bio_wait(struct bio *bio);
-extern void bio_advance(struct bio *, unsigned);
-
extern void bio_init(struct bio *bio, struct bio_vec *table,
unsigned short max_vecs);
extern void bio_uninit(struct bio *);
@@ -469,12 +414,11 @@ extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
int bio_add_zone_append_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset);
-bool __bio_try_merge_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int off, bool *same_page);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-void bio_release_pages(struct bio *bio, bool mark_dirty);
+void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
+void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
@@ -482,27 +426,16 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
-void bio_truncate(struct bio *bio, unsigned new_size);
void guard_bio_eod(struct bio *bio);
void zero_fill_bio(struct bio *bio);
-extern const char *bio_devname(struct bio *bio, char *buffer);
+static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
+{
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ __bio_release_pages(bio, mark_dirty);
+}
-#define bio_set_dev(bio, bdev) \
-do { \
- bio_clear_flag(bio, BIO_REMAPPED); \
- if ((bio)->bi_bdev != (bdev)) \
- bio_clear_flag(bio, BIO_THROTTLED); \
- (bio)->bi_bdev = (bdev); \
- bio_associate_blkg(bio); \
-} while (0)
-
-#define bio_copy_dev(dst, src) \
-do { \
- bio_clear_flag(dst, BIO_REMAPPED); \
- (dst)->bi_bdev = (src)->bi_bdev; \
- bio_clone_blkg_association(dst, src); \
-} while (0)
+extern const char *bio_devname(struct bio *bio, char *buffer);
#define bio_dev(bio) \
disk_devt((bio)->bi_bdev->bd_disk)
@@ -521,6 +454,22 @@ static inline void bio_clone_blkg_association(struct bio *dst,
struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
+{
+ bio_clear_flag(bio, BIO_REMAPPED);
+ if (bio->bi_bdev != bdev)
+ bio_clear_flag(bio, BIO_THROTTLED);
+ bio->bi_bdev = bdev;
+ bio_associate_blkg(bio);
+}
+
+static inline void bio_copy_dev(struct bio *dst, struct bio *src)
+{
+ bio_clear_flag(dst, BIO_REMAPPED);
+ dst->bi_bdev = src->bi_bdev;
+ bio_clone_blkg_association(dst, src);
+}
+
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
@@ -784,7 +733,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
*/
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
- bio->bi_opf |= REQ_HIPRI;
+ bio->bi_opf |= REQ_POLLED;
if (!is_sync_kiocb(kiocb))
bio->bi_opf |= REQ_NOWAIT;
}
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 37f36dad18bd..a241dcf50f39 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -123,6 +123,8 @@ struct device;
*/
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
void bitmap_free(const unsigned long *bitmap);
/* Managed variants of the above. */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
new file mode 100644
index 000000000000..bbab65bd5428
--- /dev/null
+++ b/include/linux/blk-crypto-profile.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_PROFILE_H
+#define __LINUX_BLK_CRYPTO_PROFILE_H
+
+#include <linux/bio.h>
+#include <linux/blk-crypto.h>
+
+struct blk_crypto_profile;
+
+/**
+ * struct blk_crypto_ll_ops - functions to control inline encryption hardware
+ *
+ * Low-level operations for controlling inline encryption hardware. This
+ * interface must be implemented by storage drivers that support inline
+ * encryption. All functions may sleep, are serialized by profile->lock, and
+ * are never called while profile->dev (if set) is runtime-suspended.
+ */
+struct blk_crypto_ll_ops {
+
+ /**
+ * @keyslot_program: Program a key into the inline encryption hardware.
+ *
+ * Program @key into the specified @slot in the inline encryption
+ * hardware, overwriting any key that the keyslot may already contain.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * This is required if the device has keyslots. Otherwise (i.e. if the
+ * device is a layered device, or if the device is real hardware that
+ * simply doesn't have the concept of keyslots) it is never called.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_program)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @keyslot_evict: Evict a key from the inline encryption hardware.
+ *
+ * If the device has keyslots, this function must evict the key from the
+ * specified @slot. The slot will contain @key, but there should be no
+ * need for the @key argument to be used as @slot should be sufficient.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * If the device doesn't have keyslots itself, this function must evict
+ * @key from any underlying devices. @slot won't be valid in this case.
+ *
+ * If there are no keyslots and no underlying devices, this function
+ * isn't required.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_evict)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+};
+
+/**
+ * struct blk_crypto_profile - inline encryption profile for a device
+ *
+ * This struct contains a storage device's inline encryption capabilities (e.g.
+ * the supported crypto algorithms), driver-provided functions to control the
+ * inline encryption hardware (e.g. programming and evicting keys), and optional
+ * device-independent keyslot management data.
+ */
+struct blk_crypto_profile {
+
+ /* public: Drivers must initialize the following fields. */
+
+ /**
+ * @ll_ops: Driver-provided functions to control the inline encryption
+ * hardware, e.g. program and evict keys.
+ */
+ struct blk_crypto_ll_ops ll_ops;
+
+ /**
+ * @max_dun_bytes_supported: The maximum number of bytes supported for
+ * specifying the data unit number (DUN). Specifically, the range of
+ * supported DUNs is 0 through (1 << (8 * max_dun_bytes_supported)) - 1.
+ */
+ unsigned int max_dun_bytes_supported;
+
+ /**
+ * @modes_supported: Array of bitmasks that specifies whether each
+ * combination of crypto mode and data unit size is supported.
+ * Specifically, the i'th bit of modes_supported[crypto_mode] is set if
+ * crypto_mode can be used with a data unit size of (1 << i). Note that
+ * only data unit sizes that are powers of 2 can be supported.
+ */
+ unsigned int modes_supported[BLK_ENCRYPTION_MODE_MAX];
+
+ /**
+ * @dev: An optional device for runtime power management. If the driver
+ * provides this device, it will be runtime-resumed before any function
+ * in @ll_ops is called and will remain resumed during the call.
+ */
+ struct device *dev;
+
+ /* private: The following fields shouldn't be accessed by drivers. */
+
+ /* Number of keyslots, or 0 if not applicable */
+ unsigned int num_slots;
+
+ /*
+ * Serializes all calls to functions in @ll_ops as well as all changes
+ * to @slot_hashtable. This can also be taken in read mode to look up
+ * keyslots while ensuring that they can't be changed concurrently.
+ */
+ struct rw_semaphore lock;
+
+ /* List of idle slots, with least recently used slot at front */
+ wait_queue_head_t idle_slots_wait_queue;
+ struct list_head idle_slots;
+ spinlock_t idle_slots_lock;
+
+ /*
+ * Hash table which maps struct *blk_crypto_key to keyslots, so that we
+ * can find a key's keyslot in O(1) time rather than O(num_slots).
+ * Protected by 'lock'.
+ */
+ struct hlist_head *slot_hashtable;
+ unsigned int log_slot_ht_size;
+
+ /* Per-keyslot data */
+ struct blk_crypto_keyslot *slots;
+};
+
+int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+int devm_blk_crypto_profile_init(struct device *dev,
+ struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
+
+blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ struct blk_crypto_keyslot **slot_ptr);
+
+void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
+
+bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
+ const struct blk_crypto_config *cfg);
+
+int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key);
+
+void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
+
+void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+
+void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
+ const struct blk_crypto_profile *child);
+
+bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
+ const struct blk_crypto_profile *reference);
+
+void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
+ const struct blk_crypto_profile *src);
+
+#endif /* __LINUX_BLK_CRYPTO_PROFILE_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
new file mode 100644
index 000000000000..8a038ea0717e
--- /dev/null
+++ b/include/linux/blk-integrity.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BLK_INTEGRITY_H
+#define _LINUX_BLK_INTEGRITY_H
+
+#include <linux/blk-mq.h>
+
+struct request;
+
+enum blk_integrity_flags {
+ BLK_INTEGRITY_VERIFY = 1 << 0,
+ BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+};
+
+struct blk_integrity_iter {
+ void *prot_buf;
+ void *data_buf;
+ sector_t seed;
+ unsigned int data_size;
+ unsigned short interval;
+ const char *disk_name;
+};
+
+typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef void (integrity_prepare_fn) (struct request *);
+typedef void (integrity_complete_fn) (struct request *, unsigned int);
+
+struct blk_integrity_profile {
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
+ integrity_prepare_fn *prepare_fn;
+ integrity_complete_fn *complete_fn;
+ const char *name;
+};
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_integrity_register(struct gendisk *, struct blk_integrity *);
+void blk_integrity_unregister(struct gendisk *);
+int blk_integrity_compare(struct gendisk *, struct gendisk *);
+int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
+ struct scatterlist *);
+int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return NULL;
+
+ return bi;
+}
+
+static inline struct blk_integrity *
+bdev_get_integrity(struct block_device *bdev)
+{
+ return blk_get_integrity(bdev->bd_disk);
+}
+
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return q->integrity.profile;
+}
+
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+ q->limits.max_integrity_segments = segs;
+}
+
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return q->limits.max_integrity_segments;
+}
+
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+}
+
+static inline bool blk_integrity_rq(struct request *rq)
+{
+ return rq->cmd_flags & REQ_INTEGRITY;
+}
+
+/*
+ * Return the first bvec that contains integrity data. Only drivers that are
+ * limited to a single integrity segment should use this helper.
+ */
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+ return NULL;
+ return rq->bio->bi_integrity->bip_vec;
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+ struct bio *b)
+{
+ return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request_queue *q,
+ struct bio *b,
+ struct scatterlist *s)
+{
+ return 0;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+ return NULL;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return NULL;
+}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return false;
+}
+static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+{
+ return 0;
+}
+static inline void blk_integrity_register(struct gendisk *d,
+ struct blk_integrity *b)
+{
+}
+static inline void blk_integrity_unregister(struct gendisk *d)
+{
+}
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+}
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+static inline int blk_integrity_rq(struct request *rq)
+{
+ return 0;
+}
+
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ return NULL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 13ba1861e688..2949d9ac7484 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -6,10 +6,226 @@
#include <linux/sbitmap.h>
#include <linux/srcu.h>
#include <linux/lockdep.h>
+#include <linux/scatterlist.h>
+#include <linux/prefetch.h>
struct blk_mq_tags;
struct blk_flush_queue;
+#define BLKDEV_MIN_RQ 4
+#define BLKDEV_DEFAULT_RQ 128
+
+typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* drive already may have started this one */
+#define RQF_STARTED ((__force req_flags_t)(1 << 1))
+/* may not be passed by ioscheduler */
+#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
+/* request for flush sequence */
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
+/* merge of different types, fail separately */
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
+/* track inflight for MQ */
+#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
+/* don't call prep for this one */
+#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
+/* vaguely specified driver internal error. Ignored by the block layer */
+#define RQF_FAILED ((__force req_flags_t)(1 << 10))
+/* don't warn about errors */
+#define RQF_QUIET ((__force req_flags_t)(1 << 11))
+/* elevator private data attached */
+#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
+/* account into disk and partition IO statistics */
+#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
+/* runtime pm request */
+#define RQF_PM ((__force req_flags_t)(1 << 15))
+/* on IO scheduler merge hash */
+#define RQF_HASHED ((__force req_flags_t)(1 << 16))
+/* track IO completion time */
+#define RQF_STATS ((__force req_flags_t)(1 << 17))
+/* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* already slept for hybrid poll */
+#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
+/* ->timeout has been called, don't expire again */
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
+/* queue has elevator attached */
+#define RQF_ELV ((__force req_flags_t)(1 << 22))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+/*
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ */
+struct request {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+
+ unsigned int cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
+
+ int tag;
+ int internal_tag;
+
+ unsigned int timeout;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ union {
+ struct list_head queuelist;
+ struct request *rq_next;
+ };
+
+ struct gendisk *rq_disk;
+ struct block_device *part;
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ /* Time that the first bio started allocating this request. */
+ u64 alloc_time_ns;
+#endif
+ /* Time that this request was allocated for this IO. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+ /*
+ * rq sectors used for blk stats. It has the same value
+ * with blk_rq_sectors(rq), except that it never be zeroed
+ * by completion.
+ */
+ unsigned short stats_sectors;
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ unsigned short nr_integrity_segments;
+#endif
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *crypt_ctx;
+ struct blk_crypto_keyslot *crypt_keyslot;
+#endif
+
+ unsigned short write_hint;
+ unsigned short ioprio;
+
+ enum mq_rq_state state;
+ refcount_t ref;
+
+ unsigned long deadline;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct llist_node ipi_list;
+ };
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. So let the
+ * completion_data share space with the rb_node.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
+ void *completion_data;
+ int error_count; /* for legacy drivers, don't use */
+ };
+
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it. Flush requests are
+ * never put on the IO scheduler. So let the flush fields share
+ * space with the elevator data.
+ */
+ union {
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ struct list_head list;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+ };
+
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+};
+
+#define req_op(req) \
+ ((req)->cmd_flags & REQ_OP_MASK)
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_op_is_passthrough(req_op(rq));
+}
+
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ return req->ioprio;
+}
+
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+enum blk_eh_timer_return {
+ BLK_EH_DONE, /* drivers has completed the command */
+ BLK_EH_RESET_TIMER, /* reset timer and try again */
+};
+
+#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
+#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
@@ -126,9 +342,6 @@ struct blk_mq_hw_ctx {
unsigned long queued;
/** @run: Number of dispatched requests. */
unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 7
- /** @dispatched: Number of dispatch requests by queue. */
- unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
@@ -148,13 +361,6 @@ struct blk_mq_hw_ctx {
/** @kobj: Kernel object for sysfs. */
struct kobject kobj;
- /** @poll_considered: Count times blk_poll() was called. */
- unsigned long poll_considered;
- /** @poll_invoked: Count how many requests blk_poll() polled. */
- unsigned long poll_invoked;
- /** @poll_success: Count how many polled requests were completed. */
- unsigned long poll_success;
-
#ifdef CONFIG_BLK_DEBUG_FS
/**
* @debugfs_dir: debugfs directory for this hardware queue. Named
@@ -232,13 +438,11 @@ enum hctx_type {
* @flags: Zero or more BLK_MQ_F_* flags.
* @driver_data: Pointer to data owned by the block driver that created this
* tag set.
- * @active_queues_shared_sbitmap:
- * number of active request queues per tag set.
- * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
- * @__breserved_tags:
- * A shared reserved tags sbitmap, used over all hctx's
* @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
* elements.
+ * @shared_tags:
+ * Shared set of tags. Has @nr_hw_queues elements. If set,
+ * shared by all @tags.
* @tag_list_lock: Serializes tag_list accesses.
* @tag_list: List of the request queues that use this tag set. See also
* request_queue.tag_set_list.
@@ -255,12 +459,11 @@ struct blk_mq_tag_set {
unsigned int timeout;
unsigned int flags;
void *driver_data;
- atomic_t active_queues_shared_sbitmap;
- struct sbitmap_queue __bitmap_tags;
- struct sbitmap_queue __breserved_tags;
struct blk_mq_tags **tags;
+ struct blk_mq_tags *shared_tags;
+
struct mutex tag_list_lock;
struct list_head tag_list;
};
@@ -330,7 +533,7 @@ struct blk_mq_ops {
/**
* @poll: Called to poll for completion of a specific tag.
*/
- int (*poll)(struct blk_mq_hw_ctx *);
+ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
/**
* @complete: Mark the request as complete.
@@ -364,11 +567,6 @@ struct blk_mq_ops {
unsigned int);
/**
- * @initialize_rq_fn: Called from inside blk_get_request().
- */
- void (*initialize_rq_fn)(struct request *rq);
-
- /**
* @cleanup_rq: Called before freeing one request which isn't completed
* yet, and usually for freeing the driver private data.
*/
@@ -432,6 +630,8 @@ enum {
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
+#define BLK_MQ_NO_HCTX_IDX (-1U)
+
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
struct lock_class_key *lkclass);
#define blk_mq_alloc_disk(set, queuedata) \
@@ -451,8 +651,6 @@ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-
void blk_mq_free_request(struct request *rq);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -471,7 +669,40 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags,
unsigned int hctx_idx);
-struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+
+/*
+ * Tag address space map.
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+
+ atomic_t active_queues;
+
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+
+ struct request **rqs;
+ struct request **static_rqs;
+ struct list_head page_list;
+
+ /*
+ * used to clear request reference in rqs[] before freeing one
+ * request pool
+ */
+ spinlock_t lock;
+};
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
+ return tags->rqs[tag];
+ }
+
+ return NULL;
+}
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -524,6 +755,35 @@ static inline void blk_mq_set_request_complete(struct request *rq)
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
+void blk_mq_end_request_batch(struct io_comp_batch *ib);
+
+/*
+ * Only need start/end time stamping if we have iostat or
+ * blk stats enabled, or using an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
+}
+
+/*
+ * Batched completions only work when there is no I/O error and no special
+ * ->end_io handler.
+ */
+static inline bool blk_mq_add_to_batch(struct request *req,
+ struct io_comp_batch *iob, int ioerror,
+ void (*complete)(struct io_comp_batch *))
+{
+ if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror)
+ return false;
+ if (!iob->complete)
+ iob->complete = complete;
+ else if (iob->complete != complete)
+ return false;
+ iob->need_ts |= blk_mq_need_time_stamp(req);
+ rq_list_add(&iob->req_list, req);
+ return true;
+}
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
@@ -538,6 +798,7 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_wait_quiesce_done(struct request_queue *q);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
@@ -605,16 +866,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
-static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
-{
- if (rq->tag != -1)
- return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
-
- return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
- BLK_QC_T_INTERNAL;
-}
-
static inline void blk_mq_cleanup_rq(struct request *rq)
{
if (rq->q->mq_ops->cleanup_rq)
@@ -633,8 +884,265 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
rq->rq_disk = bio->bi_bdev->bd_disk;
}
-blk_qc_t blk_mq_submit_bio(struct bio *bio);
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
struct lock_class_key *key);
+static inline bool rq_is_sync(struct request *rq)
+{
+ return op_is_sync(rq->cmd_flags);
+}
+
+void blk_rq_init(struct request_queue *q, struct request *rq);
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
+void blk_rq_unprep_clone(struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request_queue *q,
+ struct request *rq);
+
+struct rq_map_data {
+ struct page **pages;
+ int page_order;
+ int nr_entries;
+ unsigned long offset;
+ int null_mapped;
+ int from_user;
+};
+
+int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, const struct iov_iter *, gfp_t);
+int blk_rq_unmap_user(struct bio *);
+int blk_rq_map_kern(struct request_queue *, struct request *, void *,
+ unsigned int, gfp_t);
+int blk_rq_append_bio(struct request *rq, struct bio *bio);
+void blk_execute_rq_nowait(struct gendisk *, struct request *, int,
+ rq_end_io_fn *);
+blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
+ int at_head);
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+#define __rq_for_each_bio(_bio, rq) \
+ if ((rq->bio)) \
+ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+
+#define rq_for_each_bvec(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
+
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
+
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_err_bytes() : bytes left till the next error boundary
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_stats_sectors() : sectors of the entire request used for stats
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ if (!rq->bio)
+ return 0;
+ if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
+ return rq->bio->bi_iter.bi_size;
+ return bio_iovec(rq->bio).bv_len;
+}
+
+unsigned int blk_rq_err_bytes(const struct request *rq);
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+{
+ return rq->stats_sectors;
+}
+
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ */
+bool blk_update_request(struct request *rq, blk_status_t error,
+ unsigned int nr_bytes);
+void blk_abort_request(struct request *);
+
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return 1;
+ return rq->nr_phys_segments;
+}
+
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
+int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist, struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *last_sg = NULL;
+
+ return __blk_rq_map_sg(q, rq, sglist, &last_sg);
+}
+void blk_dump_rq_flags(struct request *, char *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
+}
+
+bool blk_req_needs_zone_write_lock(struct request *rq);
+bool blk_req_zone_write_trylock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
#endif
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+void rq_flush_dcache_pages(struct request *rq);
+#else
+static inline void rq_flush_dcache_pages(struct request *rq)
+{
+}
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+#endif /* BLK_MQ_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index be622b5a21ed..fe065c394fff 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -20,8 +20,26 @@ struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
struct bio_crypt_ctx;
+/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
struct block_device {
sector_t bd_start_sect;
+ sector_t bd_nr_sectors;
struct disk_stats __percpu *bd_stats;
unsigned long bd_stamp;
bool bd_read_only; /* read-only policy */
@@ -38,6 +56,7 @@ struct block_device {
u8 bd_partno;
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
/* The counter of freeze processes */
int bd_fsfreeze_count;
@@ -208,6 +227,9 @@ static inline void bio_issue_init(struct bio_issue *issue,
((u64)size << BIO_ISSUE_SIZE_SHIFT));
}
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -227,8 +249,8 @@ struct bio {
struct bvec_iter bi_iter;
+ blk_qc_t bi_cookie;
bio_end_io_t *bi_end_io;
-
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
@@ -384,7 +406,7 @@ enum req_flag_bits {
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_HIPRI,
+ __REQ_POLLED, /* caller polls for completion using bio_poll */
/* for driver use */
__REQ_DRV,
@@ -409,7 +431,7 @@ enum req_flag_bits {
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_HIPRI (1ULL << __REQ_HIPRI)
+#define REQ_POLLED (1ULL << __REQ_POLLED)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
@@ -431,8 +453,6 @@ enum stat_group {
#define bio_op(bio) \
((bio)->bi_opf & REQ_OP_MASK)
-#define req_op(req) \
- ((req)->cmd_flags & REQ_OP_MASK)
/* obsolete, don't use in new code */
static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
@@ -497,31 +517,6 @@ static inline int op_stat_group(unsigned int op)
return op_is_write(op);
}
-typedef unsigned int blk_qc_t;
-#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_SHIFT 16
-#define BLK_QC_T_INTERNAL (1U << 31)
-
-static inline bool blk_qc_t_valid(blk_qc_t cookie)
-{
- return cookie != BLK_QC_T_NONE;
-}
-
-static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
-{
- return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
-}
-
-static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
-{
- return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
-}
-
-static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
-{
- return (cookie & BLK_QC_T_INTERNAL) != 0;
-}
-
struct blk_rq_stat {
u64 mean;
u64 min;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 12b9dbcc980e..bd4370baccca 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -3,8 +3,6 @@
#define _LINUX_BLKDEV_H
#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/llist.h>
@@ -12,17 +10,11 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
-#include <linux/mempool.h>
-#include <linux/pfn.h>
#include <linux/bio.h>
-#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
-#include <linux/pm.h>
#include <linux/sbitmap.h>
struct module;
@@ -33,14 +25,12 @@ struct request;
struct sg_io_hdr;
struct blkcg_gq;
struct blk_flush_queue;
+struct kiocb;
struct pr_ops;
struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
-struct blk_keyslot_manager;
-
-#define BLKDEV_MIN_RQ 4
-#define BLKDEV_MAX_RQ 128 /* Default maximum */
+struct blk_crypto_profile;
/* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16
@@ -54,186 +44,13 @@ struct blk_keyslot_manager;
*/
#define BLKCG_MAX_POLS 6
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
-
-/*
- * request flags */
-typedef __u32 __bitwise req_flags_t;
-
-/* drive already may have started this one */
-#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* may not be passed by ioscheduler */
-#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
-/* request for flush sequence */
-#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
-/* merge of different types, fail separately */
-#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
-/* don't call prep for this one */
-#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* vaguely specified driver internal error. Ignored by the block layer */
-#define RQF_FAILED ((__force req_flags_t)(1 << 10))
-/* don't warn about errors */
-#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* elevator private data attached */
-#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account into disk and partition IO statistics */
-#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* runtime pm request */
-#define RQF_PM ((__force req_flags_t)(1 << 15))
-/* on IO scheduler merge hash */
-#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* track IO completion time */
-#define RQF_STATS ((__force req_flags_t)(1 << 17))
-/* Look at ->special_vec for the actual data payload instead of the
- bio chain. */
-#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-/* The per-zone write lock is held for this request */
-#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
-/* already slept for hybrid poll */
-#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
-/* ->timeout has been called, don't expire again */
-#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
-
-/* flags that prevent us from merging requests: */
-#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
-
-/*
- * Request state for blk-mq.
- */
-enum mq_rq_state {
- MQ_RQ_IDLE = 0,
- MQ_RQ_IN_FLIGHT = 1,
- MQ_RQ_COMPLETE = 2,
-};
-
-/*
- * Try to put the fields that are referenced together in the same cacheline.
- *
- * If you modify this structure, make sure to update blk_rq_init() and
- * especially blk_mq_rq_ctx_init() to take care of the added fields.
- */
-struct request {
- struct request_queue *q;
- struct blk_mq_ctx *mq_ctx;
- struct blk_mq_hw_ctx *mq_hctx;
-
- unsigned int cmd_flags; /* op and common flags */
- req_flags_t rq_flags;
-
- int tag;
- int internal_tag;
-
- /* the following two fields are internal, NEVER access directly */
- unsigned int __data_len; /* total data len */
- sector_t __sector; /* sector cursor */
-
- struct bio *bio;
- struct bio *biotail;
-
- struct list_head queuelist;
-
- /*
- * The hash is used inside the scheduler, and killed once the
- * request reaches the dispatch list. The ipi_list is only used
- * to queue the request for softirq completion, which is long
- * after the request has been unhashed (and even removed from
- * the dispatch list).
- */
- union {
- struct hlist_node hash; /* merge hash */
- struct llist_node ipi_list;
- };
-
- /*
- * The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
- */
- union {
- struct rb_node rb_node; /* sort/lookup */
- struct bio_vec special_vec;
- void *completion_data;
- int error_count; /* for legacy drivers, don't use */
- };
-
- /*
- * Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
- */
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
-
- struct gendisk *rq_disk;
- struct block_device *part;
-#ifdef CONFIG_BLK_RQ_ALLOC_TIME
- /* Time that the first bio started allocating this request. */
- u64 alloc_time_ns;
-#endif
- /* Time that this request was allocated for this IO. */
- u64 start_time_ns;
- /* Time that I/O was submitted to the device. */
- u64 io_start_time_ns;
-
-#ifdef CONFIG_BLK_WBT
- unsigned short wbt_flags;
-#endif
- /*
- * rq sectors used for blk stats. It has the same value
- * with blk_rq_sectors(rq), except that it never be zeroed
- * by completion.
- */
- unsigned short stats_sectors;
-
- /*
- * Number of scatter-gather DMA addr+len pairs after
- * physical address coalescing is performed.
- */
- unsigned short nr_phys_segments;
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- unsigned short nr_integrity_segments;
-#endif
-
-#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- struct bio_crypt_ctx *crypt_ctx;
- struct blk_ksm_keyslot *crypt_keyslot;
-#endif
-
- unsigned short write_hint;
- unsigned short ioprio;
-
- enum mq_rq_state state;
- refcount_t ref;
-
- unsigned int timeout;
- unsigned long deadline;
-
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
+static inline int blk_validate_block_size(unsigned int bsize)
+{
+ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
- /*
- * completion callback.
- */
- rq_end_io_fn *end_io;
- void *end_io_data;
-};
+ return 0;
+}
static inline bool blk_op_is_passthrough(unsigned int op)
{
@@ -241,35 +58,6 @@ static inline bool blk_op_is_passthrough(unsigned int op)
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
-static inline bool blk_rq_is_passthrough(struct request *rq)
-{
- return blk_op_is_passthrough(req_op(rq));
-}
-
-static inline unsigned short req_get_ioprio(struct request *req)
-{
- return req->ioprio;
-}
-
-#include <linux/elevator.h>
-
-struct blk_queue_ctx;
-
-struct bio_vec;
-
-enum blk_eh_timer_return {
- BLK_EH_DONE, /* drivers has completed the command */
- BLK_EH_RESET_TIMER, /* reset timer and try again */
-};
-
-enum blk_queue_state {
- Queue_down,
- Queue_up,
-};
-
-#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
-#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
-
/*
* Zoned block device models (zoned limit).
*
@@ -370,6 +158,34 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
#endif /* CONFIG_BLK_DEV_ZONED */
+/*
+ * Independent access ranges: struct blk_independent_access_range describes
+ * a range of contiguous sectors that can be accessed using device command
+ * execution resources that are independent from the resources used for
+ * other access ranges. This is typically found with single-LUN multi-actuator
+ * HDDs where each access range is served by a different set of heads.
+ * The set of independent ranges supported by the device is defined using
+ * struct blk_independent_access_ranges. The independent ranges must not overlap
+ * and must include all sectors within the disk capacity (no sector holes
+ * allowed).
+ * For a device with multiple ranges, requests targeting sectors in different
+ * ranges can be executed in parallel. A request can straddle an access range
+ * boundary.
+ */
+struct blk_independent_access_range {
+ struct kobject kobj;
+ struct request_queue *queue;
+ sector_t sector;
+ sector_t nr_sectors;
+};
+
+struct blk_independent_access_ranges {
+ struct kobject kobj;
+ bool sysfs_registered;
+ unsigned int nr_ia_ranges;
+ struct blk_independent_access_range ia_range[];
+};
+
struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
@@ -444,8 +260,7 @@ struct request_queue {
unsigned int dma_alignment;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- /* Inline crypto capabilities */
- struct blk_keyslot_manager *ksm;
+ struct blk_crypto_profile *crypto_profile;
#endif
unsigned int rq_timeout;
@@ -457,10 +272,9 @@ struct request_queue {
struct timer_list timeout;
struct work_struct timeout_work;
- atomic_t nr_active_requests_shared_sbitmap;
+ atomic_t nr_active_requests_shared_tags;
- struct sbitmap_queue sched_bitmap_tags;
- struct sbitmap_queue sched_breserved_tags;
+ struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
@@ -536,6 +350,8 @@ struct request_queue {
*/
struct mutex mq_freeze_lock;
+ int quiesce_depth;
+
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
@@ -549,10 +365,14 @@ struct request_queue {
bool mq_sysfs_init_done;
- size_t cmd_size;
-
#define BLK_MAX_WRITE_HINTS 5
u64 write_hints[BLK_MAX_WRITE_HINTS];
+
+ /*
+ * Independent sector access ranges. This is always NULL for
+ * devices that do not have multiple independent access ranges.
+ */
+ struct blk_independent_access_ranges *ia_ranges;
};
/* Keep blk_queue_flag_name[] in sync with the definitions below */
@@ -579,7 +399,6 @@ struct request_queue {
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
@@ -613,8 +432,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_secure_erase(q) \
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_scsi_passthrough(q) \
- test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
@@ -638,11 +455,6 @@ extern void blk_clear_pm_only(struct request_queue *q);
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-
-#define rq_dma_dir(rq) \
- (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
-
#define dma_map_bvec(dev, bv, dir, attrs) \
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
@@ -758,42 +570,6 @@ static inline unsigned int queue_max_active_zones(const struct request_queue *q)
}
#endif /* CONFIG_BLK_DEV_ZONED */
-static inline bool rq_is_sync(struct request *rq)
-{
- return op_is_sync(rq->cmd_flags);
-}
-
-static inline bool rq_mergeable(struct request *rq)
-{
- if (blk_rq_is_passthrough(rq))
- return false;
-
- if (req_op(rq) == REQ_OP_FLUSH)
- return false;
-
- if (req_op(rq) == REQ_OP_WRITE_ZEROES)
- return false;
-
- if (req_op(rq) == REQ_OP_ZONE_APPEND)
- return false;
-
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
- return false;
- if (rq->rq_flags & RQF_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
-{
- if (bio_page(a) == bio_page(b) &&
- bio_offset(a) == bio_offset(b))
- return true;
-
- return false;
-}
-
static inline unsigned int blk_queue_depth(struct request_queue *q)
{
if (q->queue_depth)
@@ -808,83 +584,20 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
-struct rq_map_data {
- struct page **pages;
- int page_order;
- int nr_entries;
- unsigned long offset;
- int null_mapped;
- int from_user;
-};
-
-struct req_iterator {
- struct bvec_iter iter;
- struct bio *bio;
-};
-
/* This should not be used directly - use rq_for_each_segment */
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
-#define __rq_for_each_bio(_bio, rq) \
- if ((rq->bio)) \
- for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
-#define rq_for_each_segment(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.iter)
-
-#define rq_for_each_bvec(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
-
-#define rq_iter_last(bvec, _iter) \
- (_iter.bio->bi_next == NULL && \
- bio_iter_last(bvec, _iter.iter))
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void rq_flush_dcache_pages(struct request *rq);
-#else
-static inline void rq_flush_dcache_pages(struct request *rq)
-{
-}
-#endif
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-blk_qc_t submit_bio_noacct(struct bio *bio);
-extern void blk_rq_init(struct request_queue *q, struct request *rq);
-extern void blk_put_request(struct request *);
-extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- blk_mq_req_flags_t flags);
+void submit_bio_noacct(struct bio *bio);
+
extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
-extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
- struct request *rq);
-int blk_rq_append_bio(struct request *rq, struct bio *bio);
extern void blk_queue_split(struct bio **);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern int blk_rq_map_user(struct request_queue *, struct request *,
- struct rq_map_data *, void __user *, unsigned long,
- gfp_t);
-extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct iov_iter *,
- gfp_t);
-extern void blk_execute_rq_nowait(struct gendisk *,
- struct request *, int, rq_end_io_fn *);
-
-blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
- int at_head);
/* Helper to convert REQ_OP_XXX to its string format XXX */
extern const char *blk_op_str(unsigned int op);
@@ -892,69 +605,17 @@ extern const char *blk_op_str(unsigned int op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+/* do not sleep to wait for the expected completion time */
+#define BLK_POLL_NOSLEEP (1 << 1)
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
+int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- return bdev->bd_disk->queue; /* this is never NULL */
-}
-
-/*
- * The basic unit of block I/O is a sector. It is used in a number of contexts
- * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
- * bytes. Variables of type sector_t represent an offset or size that is a
- * multiple of 512 bytes. Hence these two constants.
- */
-#ifndef SECTOR_SHIFT
-#define SECTOR_SHIFT 9
-#endif
-#ifndef SECTOR_SIZE
-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
-#endif
-
-#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
-#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
-#define SECTOR_MASK (PAGE_SECTORS - 1)
-
-/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
- * blk_rq_stats_sectors() : sectors of the entire request used for stats
- */
-static inline sector_t blk_rq_pos(const struct request *rq)
-{
- return rq->__sector;
-}
-
-static inline unsigned int blk_rq_bytes(const struct request *rq)
-{
- return rq->__data_len;
-}
-
-static inline int blk_rq_cur_bytes(const struct request *rq)
-{
- return rq->bio ? bio_cur_bytes(rq->bio) : 0;
-}
-
-extern unsigned int blk_rq_err_bytes(const struct request *rq);
-
-static inline unsigned int blk_rq_sectors(const struct request *rq)
-{
- return blk_rq_bytes(rq) >> SECTOR_SHIFT;
-}
-
-static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
-{
- return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
-}
-
-static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
-{
- return rq->stats_sectors;
+ return bdev->bd_queue; /* this is never NULL */
}
#ifdef CONFIG_BLK_DEV_ZONED
@@ -973,42 +634,8 @@ static inline unsigned int bio_zone_is_seq(struct bio *bio)
return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev),
bio->bi_iter.bi_sector);
}
-
-static inline unsigned int blk_rq_zone_no(struct request *rq)
-{
- return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
-}
-
-static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
-{
- return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
-}
#endif /* CONFIG_BLK_DEV_ZONED */
-/*
- * Some commands like WRITE SAME have a payload or data transfer size which
- * is different from the size of the request. Any driver that supports such
- * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
- * calculate the data transfer size.
- */
-static inline unsigned int blk_rq_payload_bytes(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec.bv_len;
- return blk_rq_bytes(rq);
-}
-
-/*
- * Return the first full biovec in the request. The caller needs to check that
- * there are any bvecs before calling this helper.
- */
-static inline struct bio_vec req_bvec(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec;
- return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
-}
-
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
@@ -1048,47 +675,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
return min(q->limits.max_sectors, chunk_sectors);
}
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
- sector_t offset)
-{
- struct request_queue *q = rq->q;
-
- if (blk_rq_is_passthrough(rq))
- return q->limits.max_hw_sectors;
-
- if (!q->limits.chunk_sectors ||
- req_op(rq) == REQ_OP_DISCARD ||
- req_op(rq) == REQ_OP_SECURE_ERASE)
- return blk_queue_get_max_sectors(q, req_op(rq));
-
- return min(blk_max_size_offset(q, offset, 0),
- blk_queue_get_max_sectors(q, req_op(rq)));
-}
-
-static inline unsigned int blk_rq_count_bios(struct request *rq)
-{
- unsigned int nr_bios = 0;
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- nr_bios++;
-
- return nr_bios;
-}
-
-void blk_steal_bios(struct bio_list *list, struct request *rq);
-
-/*
- * Request completion related functions.
- *
- * blk_update_request() completes given number of bytes and updates
- * the request without completing it.
- */
-extern bool blk_update_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-
-extern void blk_abort_request(struct request *);
-
/*
* Access functions for manipulating queue properties
*/
@@ -1133,46 +719,24 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-extern void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features);
-extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev);
-/*
- * Number of physical segments as sent to the device.
- *
- * Normally this is the number of discontiguous data segments sent by the
- * submitter. But for data-less command like discard we might have no
- * actual data segments submitted, but the driver might have to add it's
- * own special payload. In that case we still return 1 here so that this
- * special payload will be mapped.
- */
-static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return 1;
- return rq->nr_phys_segments;
-}
+struct blk_independent_access_ranges *
+disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
+void disk_set_independent_access_ranges(struct gendisk *disk,
+ struct blk_independent_access_ranges *iars);
/*
- * Number of discard segments (or ranges) the driver needs to fill in.
- * Each discard bio merged into a request is counted as one segment.
+ * Elevator features for blk_queue_required_elevator_features:
*/
-static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
-{
- return max_t(unsigned short, rq->nr_phys_segments, 1);
-}
-
-int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist, struct scatterlist **last_sg);
-static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
-{
- struct scatterlist *last_sg = NULL;
+/* Supports zoned block devices sequential write constraint */
+#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
+/* Supports scheduling on multiple hardware queues */
+#define ELEVATOR_F_MQ_AWARE (1U << 1)
- return __blk_rq_map_sg(q, rq, sglist, &last_sg);
-}
-extern void blk_dump_rq_flags(struct request *, char *);
+extern void blk_queue_required_elevator_features(struct request_queue *q,
+ unsigned int features);
+extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
+ struct device *dev);
bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);
@@ -1187,19 +751,24 @@ extern void blk_set_queue_dying(struct request_queue *);
* as the lock contention for request_queue lock is reduced.
*
* It is ok not to disable preemption when adding the request to the plug list
- * or when attempting a merge, because blk_schedule_flush_list() will only flush
- * the plug list when the task sleeps by itself. For details, please see
- * schedule() where blk_schedule_flush_plug() is called.
+ * or when attempting a merge. For details, please see schedule() where
+ * blk_flush_plug() is called.
*/
struct blk_plug {
- struct list_head mq_list; /* blk-mq requests */
- struct list_head cb_list; /* md requires an unplug callback */
+ struct request *mq_list; /* blk-mq requests */
+
+ /* if ios_left is > 1, we can batch tag/rq allocations */
+ struct request *cached_rq;
+ unsigned short nr_ios;
+
unsigned short rq_count;
+
bool multiple_queues;
+ bool has_elevator;
bool nowait;
+
+ struct list_head cb_list; /* md requires an unplug callback */
};
-#define BLK_MAX_REQUEST_COUNT 16
-#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1211,32 +780,17 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
+extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *, bool);
-
-static inline void blk_flush_plug(struct task_struct *tsk)
-{
- struct blk_plug *plug = tsk->plug;
- if (plug)
- blk_flush_plug_list(plug, false);
-}
-
-static inline void blk_schedule_flush_plug(struct task_struct *tsk)
-{
- struct blk_plug *plug = tsk->plug;
-
- if (plug)
- blk_flush_plug_list(plug, true);
-}
+void blk_flush_plug(struct blk_plug *plug, bool from_schedule);
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
return plug &&
- (!list_empty(&plug->mq_list) ||
- !list_empty(&plug->cb_list));
+ (plug->mq_list || !list_empty(&plug->cb_list));
}
int blkdev_issue_flush(struct block_device *bdev);
@@ -1245,23 +799,23 @@ long nr_blockdev_pages(void);
struct blk_plug {
};
-static inline void blk_start_plug(struct blk_plug *plug)
+static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
+ unsigned short nr_ios)
{
}
-static inline void blk_finish_plug(struct blk_plug *plug)
+static inline void blk_start_plug(struct blk_plug *plug)
{
}
-static inline void blk_flush_plug(struct task_struct *task)
+static inline void blk_finish_plug(struct blk_plug *plug)
{
}
-static inline void blk_schedule_flush_plug(struct task_struct *task)
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
}
-
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
return false;
@@ -1499,22 +1053,6 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return offset << SECTOR_SHIFT;
}
-/*
- * Two cases of handling DISCARD merge:
- * If max_discard_segments > 1, the driver takes every bio
- * as a range and send them to controller together. The ranges
- * needn't to be contiguous.
- * Otherwise, the bios/requests will be handled as same as
- * others which should be contiguous.
- */
-static inline bool blk_discard_mergable(struct request *req)
-{
- if (req_op(req) == REQ_OP_DISCARD &&
- queue_max_discard_segments(req->q) > 1)
- return true;
- return false;
-}
-
static inline int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1628,210 +1166,36 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
MODULE_ALIAS("block-major-" __stringify(major) "-*")
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
- BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
-};
-
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- const char *disk_name;
-};
-
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
-typedef void (integrity_prepare_fn) (struct request *);
-typedef void (integrity_complete_fn) (struct request *, unsigned int);
-
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- integrity_prepare_fn *prepare_fn;
- integrity_complete_fn *complete_fn;
- const char *name;
-};
-
-extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
-{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!bi->profile)
- return NULL;
-
- return bi;
-}
-
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
-{
- return blk_get_integrity(bdev->bd_disk);
-}
-
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
-{
- return q->integrity.profile;
-}
-
-static inline bool blk_integrity_rq(struct request *rq)
-{
- return rq->cmd_flags & REQ_INTEGRITY;
-}
-
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
- q->limits.max_integrity_segments = segs;
-}
-
-static inline unsigned short
-queue_max_integrity_segments(const struct request_queue *q)
-{
- return q->limits.max_integrity_segments;
-}
-
-/**
- * bio_integrity_intervals - Return number of integrity intervals for a bio
- * @bi: blk_integrity profile for device
- * @sectors: Size of the bio in 512-byte sectors
- *
- * Description: The block layer calculates everything in 512 byte
- * sectors but integrity metadata is done in terms of the data integrity
- * interval size of the storage device. Convert the block layer sectors
- * to the appropriate number of integrity intervals.
- */
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return sectors >> (bi->interval_exp - 9);
-}
-
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
-}
-
-/*
- * Return the first bvec that contains integrity data. Only drivers that are
- * limited to a single integrity segment should use this helper.
- */
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
-{
- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
- return NULL;
- return rq->bio->bi_integrity->bip_vec;
-}
-
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-
-struct bio;
-struct block_device;
-struct gendisk;
-struct blk_integrity;
-
-static inline int blk_integrity_rq(struct request *rq)
-{
- return 0;
-}
-static inline int blk_rq_count_integrity_sg(struct request_queue *q,
- struct bio *b)
-{
- return 0;
-}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
- struct scatterlist *s)
-{
- return 0;
-}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return NULL;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
-{
- return NULL;
-}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
-{
- return false;
-}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
-{
- return 0;
-}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
-{
-}
-static inline void blk_integrity_unregister(struct gendisk *d)
-{
-}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
-}
-static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
-{
- return 0;
-}
-
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return 0;
-}
-
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return 0;
-}
-
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
-{
- return NULL;
-}
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
+bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q);
-void blk_ksm_unregister(struct request_queue *q);
+void blk_crypto_unregister(struct request_queue *q);
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
- struct request_queue *q)
+static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q)
{
return true;
}
-static inline void blk_ksm_unregister(struct request_queue *q) { }
+static inline void blk_crypto_unregister(struct request_queue *q) { }
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+enum blk_unique_id {
+ /* these match the Designator Types specified in SPC */
+ BLK_UID_T10 = 1,
+ BLK_UID_EUI64 = 2,
+ BLK_UID_NAA = 3,
+};
+
+#define NFL4_UFLG_MASK 0x0000003F
struct block_device_operations {
- blk_qc_t (*submit_bio) (struct bio *bio);
+ void (*submit_bio)(struct bio *bio);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
@@ -1847,6 +1211,9 @@ struct block_device_operations {
int (*report_zones)(struct gendisk *, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
char *(*devnode)(struct gendisk *disk, umode_t *mode);
+ /* returns the length of the identifier or a negative errno: */
+ int (*get_unique_id)(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id id_type);
struct module *owner;
const struct pr_ops *pr_ops;
@@ -1869,60 +1236,6 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-#ifdef CONFIG_BLK_DEV_ZONED
-bool blk_req_needs_zone_write_lock(struct request *rq);
-bool blk_req_zone_write_trylock(struct request *rq);
-void __blk_req_zone_write_lock(struct request *rq);
-void __blk_req_zone_write_unlock(struct request *rq);
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
- if (blk_req_needs_zone_write_lock(rq))
- __blk_req_zone_write_lock(rq);
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
- if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
- __blk_req_zone_write_unlock(rq);
-}
-
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return rq->q->seq_zones_wlock &&
- test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- if (!blk_req_needs_zone_write_lock(rq))
- return true;
- return !blk_req_zone_is_write_locked(rq);
-}
-#else
-static inline bool blk_req_needs_zone_write_lock(struct request *rq)
-{
- return false;
-}
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
-}
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return false;
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- return true;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline void blk_wake_io_task(struct task_struct *waiter)
{
/*
@@ -1991,6 +1304,8 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
+int sync_blockdev_nowait(struct block_device *bdev);
+void sync_bdevs(bool wait);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
@@ -1999,10 +1314,54 @@ static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
+static inline int sync_blockdev_nowait(struct block_device *bdev)
+{
+ return 0;
+}
+static inline void sync_bdevs(bool wait)
+{
+}
#endif
int fsync_bdev(struct block_device *bdev);
int freeze_bdev(struct block_device *bdev);
int thaw_bdev(struct block_device *bdev);
+struct io_comp_batch {
+ struct request *req_list;
+ bool need_ts;
+ void (*complete)(struct io_comp_batch *);
+};
+
+#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
+
+#define rq_list_add(listptr, rq) do { \
+ (rq)->rq_next = *(listptr); \
+ *(listptr) = rq; \
+} while (0)
+
+#define rq_list_pop(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) { \
+ __req = *(listptr); \
+ *(listptr) = __req->rq_next; \
+ } \
+ __req; \
+})
+
+#define rq_list_peek(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) \
+ __req = *(listptr); \
+ __req; \
+})
+
+#define rq_list_for_each(listptr, pos) \
+ for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) \
+
+#define rq_list_next(rq) (rq)->rq_next
+#define rq_list_empty(list) ((list) == (struct request *) NULL)
+
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index a083e15df608..22501a293fa5 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -2,7 +2,7 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 537e1b991f11..a4665c7ab07c 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -7,8 +7,18 @@
* Author: Masami Hiramatsu <mhiramat@kernel.org>
*/
+#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean linux/bootconfig.h is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#endif
#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
#define BOOTCONFIG_MAGIC_LEN 12
@@ -25,10 +35,10 @@
* The checksum will be used with the BOOTCONFIG_MAGIC and the size for
* embedding the bootconfig in the initrd image.
*/
-static inline __init u32 xbc_calc_checksum(void *data, u32 size)
+static inline __init uint32_t xbc_calc_checksum(void *data, uint32_t size)
{
unsigned char *p = data;
- u32 ret = 0;
+ uint32_t ret = 0;
while (size--)
ret += *p++;
@@ -38,10 +48,10 @@ static inline __init u32 xbc_calc_checksum(void *data, u32 size)
/* XBC tree node */
struct xbc_node {
- u16 next;
- u16 child;
- u16 parent;
- u16 data;
+ uint16_t next;
+ uint16_t child;
+ uint16_t parent;
+ uint16_t data;
} __attribute__ ((__packed__));
#define XBC_KEY 0
@@ -271,13 +281,12 @@ static inline int __init xbc_node_compose_key(struct xbc_node *node,
}
/* XBC node initializer */
-int __init xbc_init(char *buf, const char **emsg, int *epos);
+int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+/* XBC node and size information */
+int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_destroy_all(void);
-
-/* Debug dump functions */
-void __init xbc_debug_dump(void);
+void __init xbc_exit(void);
#endif
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index eed86eb0a1de..fc53e0ad56d9 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
+#include <linux/instruction_pointer.h>
#include <linux/preempt.h>
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 2746fd804216..11820a430d6c 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -157,26 +157,6 @@ struct cgroup_bpf {
int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
-int __cgroup_bpf_attach(struct cgroup *cgrp,
- struct bpf_prog *prog, struct bpf_prog *replace_prog,
- struct bpf_cgroup_link *link,
- enum bpf_attach_type type, u32 flags);
-int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_cgroup_link *link,
- enum bpf_attach_type type);
-int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
-/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
-int cgroup_bpf_attach(struct cgroup *cgrp,
- struct bpf_prog *prog, struct bpf_prog *replace_prog,
- struct bpf_cgroup_link *link, enum bpf_attach_type type,
- u32 flags);
-int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type);
-int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
enum cgroup_bpf_attach_type atype);
@@ -517,6 +497,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define cgroup_bpf_enabled(atype) (0)
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 020a7d5bf470..f715e8863f4d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -48,6 +48,7 @@ extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
@@ -142,7 +143,8 @@ struct bpf_map_ops {
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
- int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+ int (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */
@@ -166,6 +168,7 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u64 map_extra; /* any per-map-type extra fields */
u32 map_flags;
int spin_lock_off; /* >=0 valid offset, <0 error */
int timer_off; /* >=0 valid offset, <0 error */
@@ -173,15 +176,15 @@ struct bpf_map {
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
+ u32 btf_vmlinux_value_type_id;
struct btf *btf;
#ifdef CONFIG_MEMCG_KMEM
struct mem_cgroup *memcg;
#endif
char name[BPF_OBJ_NAME_LEN];
- u32 btf_vmlinux_value_type_id;
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
- /* 22 bytes hole */
+ /* 14 bytes hole */
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
@@ -481,6 +484,12 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
@@ -511,7 +520,7 @@ struct bpf_verifier_ops {
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
- bool (*check_kfunc_call)(u32 kfunc_btf_id);
+ bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
};
struct bpf_prog_offload_ops {
@@ -875,6 +884,7 @@ struct bpf_prog_aux {
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
struct bpf_kfunc_desc_tab *kfunc_tab;
+ struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab;
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
@@ -884,6 +894,7 @@ struct bpf_prog_aux {
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
+ u32 verified_insns;
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
@@ -929,8 +940,11 @@ struct bpf_array_aux {
* stored in the map to make sure that all callers and callees have
* the same prog type and JITed flag.
*/
- enum bpf_prog_type type;
- bool jited;
+ struct {
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+ } owner;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
@@ -995,6 +1009,10 @@ bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
+int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
+ struct bpf_prog *prog,
+ const struct btf_func_model *model,
+ void *image, void *image_end);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
@@ -1009,6 +1027,22 @@ static inline void bpf_module_put(const void *data, struct module *owner)
else
module_put(owner);
}
+
+#ifdef CONFIG_NET
+/* Define it here to avoid the use of forward declaration */
+struct bpf_dummy_ops_state {
+ int val;
+};
+
+struct bpf_dummy_ops {
+ int (*test_1)(struct bpf_dummy_ops_state *cb);
+ int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
+ char a3, unsigned long a4);
+};
+
+int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+#endif
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
@@ -1089,6 +1123,7 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@ -1636,10 +1671,33 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
-bool bpf_prog_test_check_kfunc_call(u32 kfunc_id);
+bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
+
+static inline bool bpf_tracing_ctx_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
+}
+
+static inline bool bpf_tracing_btf_ctx_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (!bpf_tracing_ctx_access(off, size, type))
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
@@ -1857,7 +1915,8 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
return -ENOTSUPP;
}
-static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
+static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
+ struct module *owner)
{
return false;
}
@@ -2088,6 +2147,7 @@ extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
extern const struct bpf_func_proto bpf_copy_from_user_proto;
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_snprintf_proto;
@@ -2102,6 +2162,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -2217,6 +2278,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+#define MAX_BPRINTF_VARARGS 12
+
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 9c81724e4b98..48a91c51c015 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -101,14 +101,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
-#ifdef CONFIG_NET
-BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
-BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
#ifdef CONFIG_BPF_LSM
BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
+#ifdef CONFIG_NET
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
@@ -125,6 +125,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 5424124dbe36..c8a78e830fca 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -527,5 +527,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *tgt_prog,
u32 btf_id,
struct bpf_attach_target_info *tgt_info);
+void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
index 546e27fc6d46..46e1757d06a3 100644
--- a/include/linux/bpfptr.h
+++ b/include/linux/bpfptr.h
@@ -3,6 +3,7 @@
#ifndef _LINUX_BPFPTR_H
#define _LINUX_BPFPTR_H
+#include <linux/mm.h>
#include <linux/sockptr.h>
typedef sockptr_t bpfptr_t;
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index c2c2147dfeb8..747fad264033 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -32,6 +32,7 @@
#define PHY_ID_BCM72113 0x35905310
#define PHY_ID_BCM72116 0x35905350
+#define PHY_ID_BCM72165 0x35905340
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
@@ -49,6 +50,7 @@
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7712 0x35905330
#define PHY_ID_BCM_CYGNUS 0xae025200
#define PHY_ID_BCM_OMEGA 0xae025100
@@ -66,6 +68,7 @@
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
#define PHY_BRCM_EN_MASTER_MODE 0x00000010
+#define PHY_BRCM_IDDQ_SUSPEND 0x00000020
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -83,6 +86,7 @@
#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
@@ -233,6 +237,7 @@
#define MII_BCM54XX_EXP_EXP08 0x0F08
#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE 0x0100
#define MII_BCM54XX_EXP_EXP75 0x0f75
#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
@@ -241,6 +246,12 @@
#define MII_BCM54XX_EXP_EXP97 0x0f97
#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+/* Top-MISC expansion registers */
+#define BCM54XX_TOP_MISC_IDDQ_CTRL (MII_BCM54XX_EXP_SEL_TOP + 0x06)
+#define BCM54XX_TOP_MISC_IDDQ_LP (1 << 0)
+#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
+#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+
/*
* BCM5482: Secondary SerDes registers
*/
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 214fde93214b..203eef993d76 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -5,6 +5,7 @@
#define _LINUX_BTF_H 1
#include <linux/types.h>
+#include <linux/bpfptr.h>
#include <uapi/linux/btf.h>
#include <uapi/linux/bpf.h>
@@ -238,4 +239,42 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
}
#endif
+struct kfunc_btf_id_set {
+ struct list_head list;
+ struct btf_id_set *set;
+ struct module *owner;
+};
+
+struct kfunc_btf_id_list;
+
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
+ struct kfunc_btf_id_set *s);
+void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
+ struct kfunc_btf_id_set *s);
+bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
+ struct module *owner);
+#else
+static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
+ struct kfunc_btf_id_set *s)
+{
+}
+static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
+ struct kfunc_btf_id_set *s)
+{
+}
+static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
+ u32 kfunc_id, struct module *owner)
+{
+ return false;
+}
+#endif
+
+#define DEFINE_KFUNC_BTF_ID_SET(set, name) \
+ struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \
+ THIS_MODULE }
+
+extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
+extern struct kfunc_btf_id_list prog_test_kfunc_list;
+
#endif
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 0e9bdd42dafb..35c25dff651a 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -44,7 +44,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
-};
+} __packed;
struct bvec_iter_all {
struct bio_vec bv;
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index 9de6e9053e34..20b50baf3a02 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -19,6 +19,9 @@
/* Megahertz */
#define CAN_MHZ 1000000UL
+#define CAN_CTRLMODE_TDC_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+
/*
* struct can_tdc - CAN FD Transmission Delay Compensation parameters
*
@@ -28,34 +31,54 @@
*
* To solve this issue, ISO 11898-1 introduces in section 11.3.3
* "Transmitter delay compensation" a SSP (Secondary Sample Point)
- * equal to the distance, in time quanta, from the start of the bit
- * time on the TX pin to the actual measurement on the RX pin.
+ * equal to the distance from the start of the bit time on the TX pin
+ * to the actual measurement on the RX pin.
*
* This structure contains the parameters to calculate that SSP.
*
- * @tdcv: Transmitter Delay Compensation Value. Distance, in time
- * quanta, from when the bit is sent on the TX pin to when it is
- * received on the RX pin of the transmitter. Possible options:
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
*
- * 0: automatic mode. The controller dynamically measures @tdcv
- * for each transmitted CAN FD frame.
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------- TDCO ------->|
+ * |<----------- Secondary Sample Point ---------->|
*
- * Other values: manual mode. Use the fixed provided value.
+ * To increase precision, contrary to the other bittiming parameters
+ * which are measured in time quanta, the TDC parameters are measured
+ * in clock periods (also referred as "minimum time quantum" in ISO
+ * 11898-1).
*
- * @tdco: Transmitter Delay Compensation Offset. Offset value, in time
- * quanta, defining the distance between the start of the bit
- * reception on the RX pin of the transceiver and the SSP
- * position such that SSP = @tdcv + @tdco.
+ * @tdcv: Transmitter Delay Compensation Value. The time needed for
+ * the signal to propagate, i.e. the distance, in clock periods,
+ * from the start of the bit on the TX pin to when it is received
+ * on the RX pin. @tdcv depends on the controller modes:
+ *
+ * CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically
+ * measures @tdcv for each transmitted CAN FD frame and the
+ * value provided here should be ignored.
+ *
+ * CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv
+ * value.
*
- * If @tdco is zero, then TDC is disabled and both @tdcv and
- * @tdcf should be ignored.
+ * N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are
+ * mutually exclusive. Only one can be set at a time. If both
+ * CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset,
+ * TDC is disabled and all the values of this structure should be
+ * ignored.
+ *
+ * @tdco: Transmitter Delay Compensation Offset. Offset value, in
+ * clock periods, defining the distance between the start of the
+ * bit reception on the RX pin of the transceiver and the SSP
+ * position such that SSP = @tdcv + @tdco.
*
* @tdcf: Transmitter Delay Compensation Filter window. Defines the
- * minimum value for the SSP position in time quanta. If SSP is
- * less than @tdcf, then no delay compensations occur and the
- * normal sampling point is used instead. The feature is enabled
- * if and only if @tdcv is set to zero (automatic mode) and @tdcf
- * is configured to a value greater than @tdco.
+ * minimum value for the SSP position in clock periods. If the
+ * SSP position is less than @tdcf, then no delay compensations
+ * occur and the normal sampling point is used instead. The
+ * feature is enabled if and only if @tdcv is set to zero
+ * (automatic mode) and @tdcf is configured to a value greater
+ * than @tdco.
*/
struct can_tdc {
u32 tdcv;
@@ -67,19 +90,32 @@ struct can_tdc {
* struct can_tdc_const - CAN hardware-dependent constant for
* Transmission Delay Compensation
*
- * @tdcv_max: Transmitter Delay Compensation Value maximum value.
- * Should be set to zero if the controller does not support
- * manual mode for tdcv.
+ * @tdcv_min: Transmitter Delay Compensation Value minimum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ * @tdcv_max: Transmitter Delay Compensation Value maximum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ *
+ * @tdco_min: Transmitter Delay Compensation Offset minimum value.
* @tdco_max: Transmitter Delay Compensation Offset maximum value.
* Should not be zero. If the controller does not support TDC,
* then the pointer to this structure should be NULL.
+ *
+ * @tdcf_min: Transmitter Delay Compensation Filter window minimum
+ * value. If @tdcf_max is zero, this value is ignored.
* @tdcf_max: Transmitter Delay Compensation Filter window maximum
* value. Should be set to zero if the controller does not
* support this feature.
*/
struct can_tdc_const {
+ u32 tdcv_min;
u32 tdcv_max;
+ u32 tdco_min;
u32 tdco_max;
+ u32 tdcf_min;
u32 tdcf_max;
};
@@ -87,7 +123,9 @@ struct can_tdc_const {
int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc);
-void can_calc_tdco(struct net_device *dev);
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported);
#else /* !CONFIG_CAN_CALC_BITTIMING */
static inline int
can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
@@ -97,7 +135,10 @@ can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
return -EINVAL;
}
-static inline void can_calc_tdco(struct net_device *dev)
+static inline void
+can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
{
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 2413253e54c7..45f19d9db5ca 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -82,6 +82,7 @@ struct can_priv {
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
unsigned int echo_skb_max;
struct sk_buff **echo_skb;
@@ -96,6 +97,39 @@ struct can_priv {
#endif
};
+static inline bool can_tdc_is_enabled(const struct can_priv *priv)
+{
+ return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
+}
+
+/*
+ * can_get_relative_tdco() - TDCO relative to the sample point
+ *
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
+ */
+static inline s32 can_get_relative_tdco(const struct can_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ return (s32)priv->tdc.tdco - sample_point_in_tc;
+}
/* helper to define static CAN controller features at device creation time */
static inline void can_set_static_ctrlmode(struct net_device *dev,
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
new file mode 100644
index 000000000000..a075b70b9a70
--- /dev/null
+++ b/include/linux/cc_platform.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#ifndef _LINUX_CC_PLATFORM_H
+#define _LINUX_CC_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+/**
+ * enum cc_attr - Confidential computing attributes
+ *
+ * These attributes represent confidential computing features that are
+ * currently active.
+ */
+enum cc_attr {
+ /**
+ * @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
+ *
+ * The platform/OS is running with active memory encryption. This
+ * includes running either as a bare-metal system or a hypervisor
+ * and actively using memory encryption or as a guest/virtual machine
+ * and actively using memory encryption.
+ *
+ * Examples include SME, SEV and SEV-ES.
+ */
+ CC_ATTR_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
+ *
+ * The platform/OS is running as a bare-metal system or a hypervisor
+ * and actively using memory encryption.
+ *
+ * Examples include SME.
+ */
+ CC_ATTR_HOST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption.
+ *
+ * Examples include SEV and SEV-ES.
+ */
+ CC_ATTR_GUEST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption and register state encryption.
+ *
+ * Examples include SEV-ES.
+ */
+ CC_ATTR_GUEST_STATE_ENCRYPT,
+};
+
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+
+/**
+ * cc_platform_has() - Checks if the specified cc_attr attribute is active
+ * @attr: Confidential computing attribute to check
+ *
+ * The cc_platform_has() function will return an indicator as to whether the
+ * specified Confidential Computing attribute is currently active.
+ *
+ * Context: Any context
+ * Return:
+ * * TRUE - Specified Confidential Computing attribute is active
+ * * FALSE - Specified Confidential Computing attribute is not active
+ */
+bool cc_platform_has(enum cc_attr attr);
+
+#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
+
+static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+
+#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
+
+#endif /* _LINUX_CC_PLATFORM_H */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index c4fef00abdf3..0a89f111e00e 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -64,6 +64,7 @@ struct cdrom_device_info {
int for_data;
int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
+ __s64 last_media_change_ms;
};
struct cdrom_device_ops {
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index bc2699feddbe..7ad6c3d0db7d 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -302,6 +302,8 @@ enum {
CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
+#define CEPH_SESSION_BLOCKLISTED (1 << 0) /* session blocklisted */
+
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 83fa08a06507..3431011f364d 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -475,6 +475,14 @@ extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
u64 expected_object_size,
u64 expected_write_size,
u32 flags);
+extern int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
@@ -515,17 +523,6 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
struct page *req_page, size_t req_len,
struct page **resp_pages, size_t *resp_len);
-int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
- u64 src_snapid, u64 src_version,
- struct ceph_object_id *src_oid,
- struct ceph_object_locator *src_oloc,
- u32 src_fadvise_flags,
- struct ceph_object_id *dst_oid,
- struct ceph_object_locator *dst_oloc,
- u32 dst_fadvise_flags,
- u32 truncate_seq, u64 truncate_size,
- u8 copy_from_flags);
-
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index d128ad1570aa..3650e926e93f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -42,6 +42,7 @@ struct tegra_cpu_car_ops {
#endif
};
+#ifdef CONFIG_ARCH_TEGRA
extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
static inline void tegra_wait_cpu_in_reset(u32 cpu)
@@ -83,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu)
tegra_cpu_car_ops->disable_clock(cpu);
}
+#else
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+}
-#ifdef CONFIG_PM_SLEEP
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP)
static inline bool tegra_cpu_rail_off_ready(void)
{
if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 53fd8c3cdbd0..bd801023504b 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index bd2b881c6b63..ccbbd31b3aae 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -41,8 +41,6 @@
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
@@ -124,6 +122,14 @@
#endif
/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel,
+ * matching the defines used by Clang.
+ */
+#ifdef __SANITIZE_HWADDRESS__
+#define __SANITIZE_ADDRESS__
+#endif
+
+/*
* Turn individual warnings and errors on and off locally, depending
* on version.
*/
@@ -144,3 +150,11 @@
#else
#define __diag_GCC_8(s)
#endif
+
+/*
+ * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
+ * attribute) do not work, and must be disabled.
+ */
+#if GCC_VERSION < 90100
+#undef __alloc_size__
+#endif
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index e6ec63403965..b9121afd8733 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -34,6 +34,15 @@
#define __aligned_largest __attribute__((__aligned__))
/*
+ * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
+ * available and includes other attributes.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
+ */
+#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__)))
+
+/*
* Note: users of __always_inline currently do not write "inline" themselves,
* which seems to be required by gcc to apply the attribute according
* to its docs (and also "warning: always_inline function might not be
@@ -104,7 +113,6 @@
#define __deprecated
/*
- * Optional: only supported since gcc >= 5.1
* Optional: not supported by clang
* Optional: not supported by icc
*
@@ -153,6 +161,7 @@
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc
*/
#define __malloc __attribute__((__malloc__))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index b6ff83a714ca..1d32f4c03c9e 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -250,6 +250,18 @@ struct ftrace_likely_data {
# define __cficanonical
#endif
+/*
+ * Any place that could be marked with the "alloc_size" attribute is also
+ * a place to be marked with the "malloc" attribute. Do this as part of the
+ * __alloc_size macro to avoid redundant attributes and to avoid missing a
+ * __malloc marking.
+ */
+#ifdef __alloc_size__
+# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+#else
+# define __alloc_size(x, ...) __malloc
+#endif
+
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
@@ -290,15 +302,16 @@ struct ftrace_likely_data {
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-/* Compile time object size, -1 for unknown */
-#ifndef __compiletime_object_size
-# define __compiletime_object_size(obj) -1
-#endif
-
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
+ /* \
+ * __noreturn is needed to give the compiler enough \
+ * information to avoid certain possibly-uninitialized \
+ * warnings (regardless of the build failing). \
+ */ \
+ __noreturn extern void prefix ## suffix(void) \
+ __compiletime_error(msg); \
if (!(condition)) \
prefix ## suffix(); \
} while (0)
diff --git a/include/linux/console.h b/include/linux/console.h
index 20874db50bc8..a97f277cfdfa 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -149,6 +149,8 @@ struct console {
short flags;
short index;
int cflag;
+ uint ispeed;
+ uint ospeed;
void *data;
struct console *next;
};
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
new file mode 100644
index 000000000000..2f4944b791b8
--- /dev/null
+++ b/include/linux/container_of.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTAINER_OF_H
+#define _LINUX_CONTAINER_OF_H
+
+#include <linux/build_bug.h>
+#include <linux/err.h>
+
+#define typeof_member(T, m) typeof(((T*)0)->m)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+/**
+ * container_of_safe - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
+ */
+#define container_of_safe(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of_safe()"); \
+ IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+#endif /* _LINUX_CONTAINER_OF_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 4d7fced3a39f..7a14807c9d1a 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -105,7 +105,7 @@ static inline void user_exit_irqoff(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
-static inline bool context_tracking_guest_enter(void) { return false; }
+static __always_inline bool context_tracking_guest_enter(void) { return false; }
static inline void context_tracking_guest_exit(void) { }
#endif /* !CONFIG_CONTEXT_TRACKING */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index d16ce2819b48..b7d0a00a61cf 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -6,417 +6,280 @@
#ifndef _COUNTER_H_
#define _COUNTER_H_
-#include <linux/counter_enum.h>
+#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
#include <linux/types.h>
-
-enum counter_count_direction {
- COUNTER_COUNT_DIRECTION_FORWARD = 0,
- COUNTER_COUNT_DIRECTION_BACKWARD
-};
-extern const char *const counter_count_direction_str[2];
-
-enum counter_count_mode {
- COUNTER_COUNT_MODE_NORMAL = 0,
- COUNTER_COUNT_MODE_RANGE_LIMIT,
- COUNTER_COUNT_MODE_NON_RECYCLE,
- COUNTER_COUNT_MODE_MODULO_N
-};
-extern const char *const counter_count_mode_str[4];
+#include <linux/wait.h>
+#include <uapi/linux/counter.h>
struct counter_device;
+struct counter_count;
+struct counter_synapse;
struct counter_signal;
+enum counter_comp_type {
+ COUNTER_COMP_U8,
+ COUNTER_COMP_U64,
+ COUNTER_COMP_BOOL,
+ COUNTER_COMP_SIGNAL_LEVEL,
+ COUNTER_COMP_FUNCTION,
+ COUNTER_COMP_SYNAPSE_ACTION,
+ COUNTER_COMP_ENUM,
+ COUNTER_COMP_COUNT_DIRECTION,
+ COUNTER_COMP_COUNT_MODE,
+};
+
/**
- * struct counter_signal_ext - Counter Signal extensions
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
+ * struct counter_comp - Counter component node
+ * @type: Counter component data type
+ * @name: device-specific component name
+ * @priv: component-relevant data
+ * @action_read Synapse action mode read callback. The read value of the
+ * respective Synapse action mode should be passed back via
+ * the action parameter.
+ * @device_u8_read Device u8 component read callback. The read value of the
+ * respective Device u8 component should be passed back via
+ * the val parameter.
+ * @count_u8_read Count u8 component read callback. The read value of the
+ * respective Count u8 component should be passed back via
+ * the val parameter.
+ * @signal_u8_read Signal u8 component read callback. The read value of the
+ * respective Signal u8 component should be passed back via
+ * the val parameter.
+ * @device_u32_read Device u32 component read callback. The read value of
+ * the respective Device u32 component should be passed
+ * back via the val parameter.
+ * @count_u32_read Count u32 component read callback. The read value of the
+ * respective Count u32 component should be passed back via
+ * the val parameter.
+ * @signal_u32_read Signal u32 component read callback. The read value of
+ * the respective Signal u32 component should be passed
+ * back via the val parameter.
+ * @device_u64_read Device u64 component read callback. The read value of
+ * the respective Device u64 component should be passed
+ * back via the val parameter.
+ * @count_u64_read Count u64 component read callback. The read value of the
+ * respective Count u64 component should be passed back via
+ * the val parameter.
+ * @signal_u64_read Signal u64 component read callback. The read value of
+ * the respective Signal u64 component should be passed
+ * back via the val parameter.
+ * @action_write Synapse action mode write callback. The write value of
+ * the respective Synapse action mode is passed via the
+ * action parameter.
+ * @device_u8_write Device u8 component write callback. The write value of
+ * the respective Device u8 component is passed via the val
+ * parameter.
+ * @count_u8_write Count u8 component write callback. The write value of
+ * the respective Count u8 component is passed via the val
+ * parameter.
+ * @signal_u8_write Signal u8 component write callback. The write value of
+ * the respective Signal u8 component is passed via the val
+ * parameter.
+ * @device_u32_write Device u32 component write callback. The write value of
+ * the respective Device u32 component is passed via the
+ * val parameter.
+ * @count_u32_write Count u32 component write callback. The write value of
+ * the respective Count u32 component is passed via the val
+ * parameter.
+ * @signal_u32_write Signal u32 component write callback. The write value of
+ * the respective Signal u32 component is passed via the
+ * val parameter.
+ * @device_u64_write Device u64 component write callback. The write value of
+ * the respective Device u64 component is passed via the
+ * val parameter.
+ * @count_u64_write Count u64 component write callback. The write value of
+ * the respective Count u64 component is passed via the val
+ * parameter.
+ * @signal_u64_write Signal u64 component write callback. The write value of
+ * the respective Signal u64 component is passed via the
+ * val parameter.
*/
-struct counter_signal_ext {
+struct counter_comp {
+ enum counter_comp_type type;
const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_signal *signal, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
void *priv;
+ union {
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*device_u8_read)(struct counter_device *counter, u8 *val);
+ int (*count_u8_read)(struct counter_device *counter,
+ struct counter_count *count, u8 *val);
+ int (*signal_u8_read)(struct counter_device *counter,
+ struct counter_signal *signal, u8 *val);
+ int (*device_u32_read)(struct counter_device *counter,
+ u32 *val);
+ int (*count_u32_read)(struct counter_device *counter,
+ struct counter_count *count, u32 *val);
+ int (*signal_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal, u32 *val);
+ int (*device_u64_read)(struct counter_device *counter,
+ u64 *val);
+ int (*count_u64_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *val);
+ int (*signal_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal, u64 *val);
+ };
+ union {
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*device_u8_write)(struct counter_device *counter, u8 val);
+ int (*count_u8_write)(struct counter_device *counter,
+ struct counter_count *count, u8 val);
+ int (*signal_u8_write)(struct counter_device *counter,
+ struct counter_signal *signal, u8 val);
+ int (*device_u32_write)(struct counter_device *counter,
+ u32 val);
+ int (*count_u32_write)(struct counter_device *counter,
+ struct counter_count *count, u32 val);
+ int (*signal_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal, u32 val);
+ int (*device_u64_write)(struct counter_device *counter,
+ u64 val);
+ int (*count_u64_write)(struct counter_device *counter,
+ struct counter_count *count, u64 val);
+ int (*signal_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal, u64 val);
+ };
};
/**
* struct counter_signal - Counter Signal node
- * @id: unique ID used to identify signal
- * @name: device-specific Signal name; ideally, this should match the name
- * as it appears in the datasheet documentation
- * @ext: optional array of Counter Signal extensions
- * @num_ext: number of Counter Signal extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @id: unique ID used to identify the Signal
+ * @name: device-specific Signal name
+ * @ext: optional array of Signal extensions
+ * @num_ext: number of Signal extensions specified in @ext
*/
struct counter_signal {
int id;
const char *name;
- const struct counter_signal_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
-};
-
-/**
- * struct counter_signal_enum_ext - Signal enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_signal_enum_ext structure can be used to implement enum style
- * Signal extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Signal
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_signal_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter,
- struct counter_signal *signal, size_t *item);
- int (*set)(struct counter_device *counter,
- struct counter_signal *signal, size_t item);
-};
-
-/**
- * COUNTER_SIGNAL_ENUM() - Initialize Signal enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * This should usually be used together with COUNTER_SIGNAL_ENUM_AVAILABLE()
- */
-#define COUNTER_SIGNAL_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_signal_enum_read, \
- .write = counter_signal_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_SIGNAL_ENUM_AVAILABLE() - Initialize Signal enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_SIGNAL_ENUM()
- */
-#define COUNTER_SIGNAL_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_signal_enum_available_read, \
- .priv = (_e) \
-}
-
-enum counter_synapse_action {
- COUNTER_SYNAPSE_ACTION_NONE = 0,
- COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
/**
* struct counter_synapse - Counter Synapse node
- * @action: index of current action mode
* @actions_list: array of available action modes
* @num_actions: number of action modes specified in @actions_list
- * @signal: pointer to associated signal
+ * @signal: pointer to the associated Signal
*/
struct counter_synapse {
- size_t action;
const enum counter_synapse_action *actions_list;
size_t num_actions;
struct counter_signal *signal;
};
-struct counter_count;
-
-/**
- * struct counter_count_ext - Counter Count extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_count_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_count *count, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-enum counter_function {
- COUNTER_FUNCTION_INCREASE = 0,
- COUNTER_FUNCTION_DECREASE,
- COUNTER_FUNCTION_PULSE_DIRECTION,
- COUNTER_FUNCTION_QUADRATURE_X1_A,
- COUNTER_FUNCTION_QUADRATURE_X1_B,
- COUNTER_FUNCTION_QUADRATURE_X2_A,
- COUNTER_FUNCTION_QUADRATURE_X2_B,
- COUNTER_FUNCTION_QUADRATURE_X4
-};
-
/**
* struct counter_count - Counter Count node
- * @id: unique ID used to identify Count
- * @name: device-specific Count name; ideally, this should match
- * the name as it appears in the datasheet documentation
- * @function: index of current function mode
- * @functions_list: array available function modes
+ * @id: unique ID used to identify the Count
+ * @name: device-specific Count name
+ * @functions_list: array of available function modes
* @num_functions: number of function modes specified in @functions_list
- * @synapses: array of synapses for initialization
- * @num_synapses: number of synapses specified in @synapses
- * @ext: optional array of Counter Count extensions
- * @num_ext: number of Counter Count extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @synapses: array of Synapses for initialization
+ * @num_synapses: number of Synapses specified in @synapses
+ * @ext: optional array of Count extensions
+ * @num_ext: number of Count extensions specified in @ext
*/
struct counter_count {
int id;
const char *name;
- size_t function;
const enum counter_function *functions_list;
size_t num_functions;
struct counter_synapse *synapses;
size_t num_synapses;
- const struct counter_count_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
};
/**
- * struct counter_count_enum_ext - Count enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_count_enum_ext structure can be used to implement enum style
- * Count extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Count
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
+ * struct counter_event_node - Counter Event node
+ * @l: list of current watching Counter events
+ * @event: event that triggers
+ * @channel: event channel
+ * @comp_list: list of components to watch when event triggers
*/
-struct counter_count_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, struct counter_count *count,
- size_t *item);
- int (*set)(struct counter_device *counter, struct counter_count *count,
- size_t item);
-};
-
-/**
- * COUNTER_COUNT_ENUM() - Initialize Count enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * This should usually be used together with COUNTER_COUNT_ENUM_AVAILABLE()
- */
-#define COUNTER_COUNT_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_count_enum_read, \
- .write = counter_count_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_COUNT_ENUM_AVAILABLE() - Initialize Count enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_COUNT_ENUM()
- */
-#define COUNTER_COUNT_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_count_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
- * struct counter_device_attr_group - internal container for attribute group
- * @attr_group: Counter sysfs attributes group
- * @attr_list: list to keep track of created Counter sysfs attributes
- * @num_attr: number of Counter sysfs attributes
- */
-struct counter_device_attr_group {
- struct attribute_group attr_group;
- struct list_head attr_list;
- size_t num_attr;
-};
-
-/**
- * struct counter_device_state - internal state container for a Counter device
- * @id: unique ID used to identify the Counter
- * @dev: internal device structure
- * @groups_list: attribute groups list (for Signals, Counts, and ext)
- * @num_groups: number of attribute groups containers
- * @groups: Counter sysfs attribute groups (to populate @dev.groups)
- */
-struct counter_device_state {
- int id;
- struct device dev;
- struct counter_device_attr_group *groups_list;
- size_t num_groups;
- const struct attribute_group **groups;
-};
-
-enum counter_signal_level {
- COUNTER_SIGNAL_LEVEL_LOW,
- COUNTER_SIGNAL_LEVEL_HIGH,
+struct counter_event_node {
+ struct list_head l;
+ u8 event;
+ u8 channel;
+ struct list_head comp_list;
};
/**
* struct counter_ops - Callbacks from driver
- * @signal_read: optional read callback for Signal attribute. The read
- * level of the respective Signal should be passed back via
- * the level parameter.
- * @count_read: optional read callback for Count attribute. The read
- * value of the respective Count should be passed back via
- * the val parameter.
- * @count_write: optional write callback for Count attribute. The write
- * value for the respective Count is passed in via the val
+ * @signal_read: optional read callback for Signals. The read level of
+ * the respective Signal should be passed back via the
+ * level parameter.
+ * @count_read: read callback for Counts. The read value of the
+ * respective Count should be passed back via the value
+ * parameter.
+ * @count_write: optional write callback for Counts. The write value for
+ * the respective Count is passed in via the value
* parameter.
- * @function_get: function to get the current count function mode. Returns
- * 0 on success and negative error code on error. The index
- * of the respective Count's returned function mode should
- * be passed back via the function parameter.
- * @function_set: function to set the count function mode. function is the
- * index of the requested function mode from the respective
- * Count's functions_list array.
- * @action_get: function to get the current action mode. Returns 0 on
- * success and negative error code on error. The index of
- * the respective Synapse's returned action mode should be
+ * @function_read: read callback the Count function modes. The read
+ * function mode of the respective Count should be passed
+ * back via the function parameter.
+ * @function_write: optional write callback for Count function modes. The
+ * function mode to write for the respective Count is
+ * passed in via the function parameter.
+ * @action_read: optional read callback the Synapse action modes. The
+ * read action mode of the respective Synapse should be
* passed back via the action parameter.
- * @action_set: function to set the action mode. action is the index of
- * the requested action mode from the respective Synapse's
- * actions_list array.
+ * @action_write: optional write callback for Synapse action modes. The
+ * action mode to write for the respective Synapse is
+ * passed in via the action parameter.
+ * @events_configure: optional write callback to configure events. The list of
+ * struct counter_event_node may be accessed via the
+ * events_list member of the counter parameter.
+ * @watch_validate: optional callback to validate a watch. The Counter
+ * component watch configuration is passed in via the watch
+ * parameter. A return value of 0 indicates a valid Counter
+ * component watch configuration.
*/
struct counter_ops {
int (*signal_read)(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level);
int (*count_read)(struct counter_device *counter,
- struct counter_count *count, unsigned long *val);
+ struct counter_count *count, u64 *value);
int (*count_write)(struct counter_device *counter,
- struct counter_count *count, unsigned long val);
- int (*function_get)(struct counter_device *counter,
- struct counter_count *count, size_t *function);
- int (*function_set)(struct counter_device *counter,
- struct counter_count *count, size_t function);
- int (*action_get)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t *action);
- int (*action_set)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t action);
-};
-
-/**
- * struct counter_device_ext - Counter device extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_device_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-/**
- * struct counter_device_enum_ext - Counter enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_device_enum_ext structure can be used to implement enum style
- * Counter extension attributes. Enum style attributes are those which have a
- * set of strings that map to unsigned integer values. The Generic Counter enum
- * extension helper code takes care of mapping between value and string, as well
- * as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_device_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, size_t *item);
- int (*set)(struct counter_device *counter, size_t item);
+ struct counter_count *count, u64 value);
+ int (*function_read)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function);
+ int (*function_write)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function);
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*events_configure)(struct counter_device *counter);
+ int (*watch_validate)(struct counter_device *counter,
+ const struct counter_watch *watch);
};
/**
- * COUNTER_DEVICE_ENUM() - Initialize Counter enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * This should usually be used together with COUNTER_DEVICE_ENUM_AVAILABLE()
- */
-#define COUNTER_DEVICE_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_device_enum_read, \
- .write = counter_device_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_DEVICE_ENUM_AVAILABLE() - Initialize Counter enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_DEVICE_ENUM()
- */
-#define COUNTER_DEVICE_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_device_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
* struct counter_device - Counter data structure
- * @name: name of the device as it appears in the datasheet
+ * @name: name of the device
* @parent: optional parent device providing the counters
- * @device_state: internal device state container
* @ops: callbacks from driver
* @signals: array of Signals
* @num_signals: number of Signals specified in @signals
@@ -425,11 +288,21 @@ struct counter_device_enum_ext {
* @ext: optional array of Counter device extensions
* @num_ext: number of Counter device extensions specified in @ext
* @priv: optional private data supplied by driver
+ * @dev: internal device structure
+ * @chrdev: internal character device structure
+ * @events_list: list of current watching Counter events
+ * @events_list_lock: lock to protect Counter events list operations
+ * @next_events_list: list of next watching Counter events
+ * @n_events_list_lock: lock to protect Counter next events list operations
+ * @events: queue of detected Counter events
+ * @events_wait: wait queue to allow blocking reads of Counter events
+ * @events_in_lock: lock to protect Counter events queue in operations
+ * @events_out_lock: lock to protect Counter events queue out operations
+ * @ops_exist_lock: lock to prevent use during removal
*/
struct counter_device {
const char *name;
struct device *parent;
- struct counter_device_state *device_state;
const struct counter_ops *ops;
@@ -438,17 +311,171 @@ struct counter_device {
struct counter_count *counts;
size_t num_counts;
- const struct counter_device_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
void *priv;
+
+ struct device dev;
+ struct cdev chrdev;
+ struct list_head events_list;
+ spinlock_t events_list_lock;
+ struct list_head next_events_list;
+ struct mutex n_events_list_lock;
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ wait_queue_head_t events_wait;
+ spinlock_t events_in_lock;
+ struct mutex events_out_lock;
+ struct mutex ops_exist_lock;
};
int counter_register(struct counter_device *const counter);
void counter_unregister(struct counter_device *const counter);
int devm_counter_register(struct device *dev,
struct counter_device *const counter);
-void devm_counter_unregister(struct device *dev,
- struct counter_device *const counter);
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel);
+
+#define COUNTER_COMP_DEVICE_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .device_u64_read = (_read), \
+ .device_u64_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .count_u64_read = (_read), \
+ .count_u64_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .signal_u64_read = (_read), \
+ .signal_u64_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+struct counter_available {
+ union {
+ const u32 *enums;
+ const char *const *strs;
+ };
+ size_t num_items;
+};
+
+#define DEFINE_COUNTER_AVAILABLE(_name, _enums) \
+ struct counter_available _name = { \
+ .enums = (_enums), \
+ .num_items = ARRAY_SIZE(_enums), \
+ }
+
+#define DEFINE_COUNTER_ENUM(_name, _strs) \
+ struct counter_available _name = { \
+ .strs = (_strs), \
+ .num_items = ARRAY_SIZE(_strs), \
+ }
+
+#define COUNTER_COMP_DEVICE_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .device_u32_read = (_get), \
+ .device_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_COUNT_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .count_u32_read = (_get), \
+ .count_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_SIGNAL_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .signal_u32_read = (_get), \
+ .signal_u32_write = (_set), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_CEILING(_read, _write) \
+ COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+
+#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_COUNT_MODE, \
+ .name = "count_mode", \
+ .count_u32_read = (_read), \
+ .count_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_DIRECTION(_read) \
+{ \
+ .type = COUNTER_COMP_COUNT_DIRECTION, \
+ .name = "direction", \
+ .count_u32_read = (_read), \
+}
+
+#define COUNTER_COMP_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("enable", _read, _write)
+
+#define COUNTER_COMP_FLOOR(_read, _write) \
+ COUNTER_COMP_COUNT_U64("floor", _read, _write)
+
+#define COUNTER_COMP_PRESET(_read, _write) \
+ COUNTER_COMP_COUNT_U64("preset", _read, _write)
+
+#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write)
#endif /* _COUNTER_H_ */
diff --git a/include/linux/counter_enum.h b/include/linux/counter_enum.h
deleted file mode 100644
index 9f917298a88f..000000000000
--- a/include/linux/counter_enum.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Counter interface enum functions
- * Copyright (C) 2018 William Breathitt Gray
- */
-#ifndef _COUNTER_ENUM_H_
-#define _COUNTER_ENUM_H_
-
-#include <linux/types.h>
-
-struct counter_device;
-struct counter_signal;
-struct counter_count;
-
-ssize_t counter_signal_enum_read(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- char *buf);
-ssize_t counter_signal_enum_write(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_signal_enum_available_read(struct counter_device *counter,
- struct counter_signal *signal,
- void *priv, char *buf);
-
-ssize_t counter_count_enum_read(struct counter_device *counter,
- struct counter_count *count, void *priv,
- char *buf);
-ssize_t counter_count_enum_write(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_count_enum_available_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf);
-
-ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
- char *buf);
-ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_device_enum_available_read(struct counter_device *counter,
- void *priv, char *buf);
-
-#endif /* _COUNTER_ENUM_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ff88bb3e44fc..1ab29e61b078 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -119,6 +119,13 @@ struct cpufreq_policy {
bool strict_target;
/*
+ * Set if inefficient frequencies were found in the frequency table.
+ * This indicates if the relation flag CPUFREQ_RELATION_E can be
+ * honored.
+ */
+ bool efficiencies_available;
+
+ /*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
@@ -273,6 +280,12 @@ static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
+/* relation flags */
+#define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
+
+#define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
struct freq_attr {
struct attribute attr;
@@ -385,7 +398,7 @@ struct cpufreq_driver {
/* flags */
/*
- * Set by drivers that need to update internale upper and lower boundaries along
+ * Set by drivers that need to update internal upper and lower boundaries along
* with the target frequency and so the core and governors should also invoke
* the diver if the target frequency does not change, but the policy min or max
* may have changed.
@@ -627,9 +640,11 @@ struct cpufreq_governor *cpufreq_fallback_governor(void);
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_HE);
else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_LE);
}
/* Governor attribute set */
@@ -660,10 +675,11 @@ struct governor_attr {
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0u
-#define CPUFREQ_TABLE_END ~1u
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
-#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
struct cpufreq_frequency_table {
unsigned int flags;
@@ -740,6 +756,22 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
+/**
+ * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID and
+ * CPUFREQ_INEFFICIENT_FREQ frequencies.
+ * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
+ * @table: the &struct cpufreq_frequency_table to iterate over.
+ * @idx: the table entry currently being processed.
+ * @efficiencies: set to true to only iterate over efficient frequencies.
+ */
+
+#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
+ continue; \
+ else
+
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
@@ -764,14 +796,15 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq >= target_freq)
@@ -785,14 +818,15 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
/* Find lowest freq at or above target in a table in descending order */
static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -815,26 +849,30 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_al(policy, target_freq);
+ return cpufreq_table_find_index_al(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dl(policy, target_freq);
+ return cpufreq_table_find_index_dl(policy, target_freq,
+ efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -857,14 +895,15 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
/* Find highest freq at or below target in a table in descending order */
static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq <= target_freq)
@@ -878,26 +917,30 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ah(policy, target_freq);
+ return cpufreq_table_find_index_ah(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dh(policy, target_freq);
+ return cpufreq_table_find_index_dh(policy, target_freq,
+ efficiencies);
}
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -924,14 +967,15 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
/* Find closest freq to target in a table in descending order */
static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -958,35 +1002,58 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ac(policy, target_freq);
+ return cpufreq_table_find_index_ac(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dc(policy, target_freq);
+ return cpufreq_table_find_index_dc(policy, target_freq,
+ efficiencies);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
+ bool efficiencies = policy->efficiencies_available &&
+ (relation & CPUFREQ_RELATION_E);
+ int idx;
+
+ /* cpufreq_table_index_unsorted() has no use for this flag anyway */
+ relation &= ~CPUFREQ_RELATION_E;
+
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
return cpufreq_table_index_unsorted(policy, target_freq,
relation);
-
+retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- return cpufreq_table_find_index_l(policy, target_freq);
+ idx = cpufreq_table_find_index_l(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_H:
- return cpufreq_table_find_index_h(policy, target_freq);
+ idx = cpufreq_table_find_index_h(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_C:
- return cpufreq_table_find_index_c(policy, target_freq);
+ idx = cpufreq_table_find_index_c(policy, target_freq,
+ efficiencies);
+ break;
default:
WARN_ON_ONCE(1);
return 0;
}
+
+ if (idx < 0 && efficiencies) {
+ efficiencies = false;
+ goto retry;
+ }
+
+ return idx;
}
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
@@ -1003,6 +1070,37 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
+/**
+ * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
+ * @policy: the &struct cpufreq_policy containing the inefficient frequency
+ * @frequency: the inefficient frequency
+ *
+ * The &struct cpufreq_policy must use a sorted frequency table
+ *
+ * Return: %0 on success or a negative errno code
+ */
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ struct cpufreq_frequency_table *pos;
+
+ /* Not supported */
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table) {
+ if (pos->frequency == frequency) {
+ pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
+ policy->efficiencies_available = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static inline int parse_perf_domain(int cpu, const char *list_name,
const char *cell_name)
{
@@ -1041,7 +1139,7 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
if (cpu == pcpu)
continue;
- ret = parse_perf_domain(pcpu, list_name, cell_name);
+ ret = parse_perf_domain(cpu, list_name, cell_name);
if (ret < 0)
continue;
@@ -1071,6 +1169,13 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ return -EINVAL;
+}
+
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
const char *cell_name, struct cpumask *cpumask)
{
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 832d8a74fa59..773c83730906 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -72,6 +72,8 @@ enum cpuhp_state {
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
+ /* Must be after CPUHP_MM_VMSTAT_DEAD */
+ CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -97,6 +99,7 @@ enum cpuhp_state {
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
+ CPUHP_AP_DTPM_CPU_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -240,9 +243,10 @@ enum cpuhp_state {
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
+ CPUHP_AP_MM_DEMOTION_ONLINE,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
- CPUHP_AP_DTPM_CPU_ONLINE,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index d2b9c41c8edf..d58e0476ee8e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -34,6 +34,8 @@
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
+extern struct static_key_false cpusets_insane_config_key;
+
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
@@ -51,6 +53,19 @@ static inline void cpuset_dec(void)
static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
}
+/*
+ * This will get enabled whenever a cpuset configuration is considered
+ * unsupportable in general. E.g. movable only node which cannot satisfy
+ * any non movable allocations (see update_nodemask). Page allocator
+ * needs to make additional checks for those configurations and this
+ * check is meant to guard those checks without any overhead for sane
+ * configurations.
+ */
+static inline bool cpusets_insane_config(void)
+{
+ return static_branch_unlikely(&cpusets_insane_config_key);
+}
+
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
@@ -167,6 +182,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline bool cpusets_enabled(void) { return false; }
+static inline bool cpusets_insane_config(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 2618577a4d6d..620821549b23 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,8 +8,6 @@
#include <linux/pgtable.h>
#include <uapi/linux/vmcore.h>
-#include <linux/pgtable.h> /* for pgprot_t */
-
/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
@@ -91,12 +89,32 @@ static inline void vmcore_unusable(void)
elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
-#define HAVE_OLDMEM_PFN_IS_RAM 1
-extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
-extern void unregister_oldmem_pfn_is_ram(void);
+/**
+ * struct vmcore_cb - driver callbacks for /proc/vmcore handling
+ * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when
+ * reading the vmcore. Will return "true" if it is RAM or if the
+ * callback cannot tell. If any callback returns "false", it's not
+ * RAM and the page must not be accessed; zeroes should be
+ * indicated in the vmcore instead. For example, a ballooned page
+ * contains no data and reading from such a page will cause high
+ * load in the hypervisor.
+ * @next: List head to manage registered callbacks internally; initialized by
+ * register_vmcore_cb().
+ *
+ * vmcore callbacks allow drivers managing physical memory ranges to
+ * coordinate with vmcore handling code, for example, to prevent accessing
+ * physical memory ranges that should not be accessed when reading the vmcore,
+ * although included in the vmcore header as memory ranges to dump.
+ */
+struct vmcore_cb {
+ bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ struct list_head next;
+};
+extern void register_vmcore_cb(struct vmcore_cb *cb);
+extern void unregister_vmcore_cb(struct vmcore_cb *cb);
#else /* !CONFIG_CRASH_DUMP */
-static inline bool is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
/* Device Dump information to be filled by drivers */
diff --git a/include/linux/damon.h b/include/linux/damon.h
index d68b67b8d458..b4d4be3cc987 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -14,6 +14,8 @@
/* Minimal region size. Every damon_region is aligned by this. */
#define DAMON_MIN_REGION PAGE_SIZE
+/* Max priority score for DAMON-based operation schemes */
+#define DAMOS_MAX_SCORE (99)
/**
* struct damon_addr_range - Represents an address region of [@start, @end).
@@ -31,12 +33,22 @@ struct damon_addr_range {
* @sampling_addr: Address of the sample for the next access check.
* @nr_accesses: Access frequency of this region.
* @list: List head for siblings.
+ * @age: Age of this region.
+ *
+ * @age is initially zero, increased for each aggregation interval, and reset
+ * to zero again if the access frequency is significantly changed. If two
+ * regions are merged into a new region, both @nr_accesses and @age of the new
+ * region are set as region size-weighted average of those of the two regions.
*/
struct damon_region {
struct damon_addr_range ar;
unsigned long sampling_addr;
unsigned int nr_accesses;
struct list_head list;
+
+ unsigned int age;
+/* private: Internal value for age calculation. */
+ unsigned int last_nr_accesses;
};
/**
@@ -59,16 +71,180 @@ struct damon_target {
struct list_head list;
};
+/**
+ * enum damos_action - Represents an action of a Data Access Monitoring-based
+ * Operation Scheme.
+ *
+ * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
+ * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
+ * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
+ * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
+ * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_STAT: Do nothing but count the stat.
+ */
+enum damos_action {
+ DAMOS_WILLNEED,
+ DAMOS_COLD,
+ DAMOS_PAGEOUT,
+ DAMOS_HUGEPAGE,
+ DAMOS_NOHUGEPAGE,
+ DAMOS_STAT, /* Do nothing but only record the stat */
+};
+
+/**
+ * struct damos_quota - Controls the aggressiveness of the given scheme.
+ * @ms: Maximum milliseconds that the scheme can use.
+ * @sz: Maximum bytes of memory that the action can be applied.
+ * @reset_interval: Charge reset interval in milliseconds.
+ *
+ * @weight_sz: Weight of the region's size for prioritization.
+ * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
+ * @weight_age: Weight of the region's age for prioritization.
+ *
+ * To avoid consuming too much CPU time or IO resources for applying the
+ * &struct damos->action to large memory, DAMON allows users to set time and/or
+ * size quotas. The quotas can be set by writing non-zero values to &ms and
+ * &sz, respectively. If the time quota is set, DAMON tries to use only up to
+ * &ms milliseconds within &reset_interval for applying the action. If the
+ * size quota is set, DAMON tries to apply the action only up to &sz bytes
+ * within &reset_interval.
+ *
+ * Internally, the time quota is transformed to a size quota using estimated
+ * throughput of the scheme's action. DAMON then compares it against &sz and
+ * uses smaller one as the effective quota.
+ *
+ * For selecting regions within the quota, DAMON prioritizes current scheme's
+ * target memory regions using the &struct damon_primitive->get_scheme_score.
+ * You could customize the prioritization logic by setting &weight_sz,
+ * &weight_nr_accesses, and &weight_age, because monitoring primitives are
+ * encouraged to respect those.
+ */
+struct damos_quota {
+ unsigned long ms;
+ unsigned long sz;
+ unsigned long reset_interval;
+
+ unsigned int weight_sz;
+ unsigned int weight_nr_accesses;
+ unsigned int weight_age;
+
+/* private: */
+ /* For throughput estimation */
+ unsigned long total_charged_sz;
+ unsigned long total_charged_ns;
+
+ unsigned long esz; /* Effective size quota in bytes */
+
+ /* For charging the quota */
+ unsigned long charged_sz;
+ unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
+
+ /* For prioritization */
+ unsigned long histogram[DAMOS_MAX_SCORE + 1];
+ unsigned int min_score;
+};
+
+/**
+ * enum damos_wmark_metric - Represents the watermark metric.
+ *
+ * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
+ * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
+ */
+enum damos_wmark_metric {
+ DAMOS_WMARK_NONE,
+ DAMOS_WMARK_FREE_MEM_RATE,
+};
+
+/**
+ * struct damos_watermarks - Controls when a given scheme should be activated.
+ * @metric: Metric for the watermarks.
+ * @interval: Watermarks check time interval in microseconds.
+ * @high: High watermark.
+ * @mid: Middle watermark.
+ * @low: Low watermark.
+ *
+ * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
+ * means DAMON does monitoring and applying the action of the scheme to
+ * appropriate memory regions. Else, DAMON checks &metric of the system for at
+ * least every &interval microseconds and works as below.
+ *
+ * If &metric is higher than &high, the scheme is inactivated. If &metric is
+ * between &mid and &low, the scheme is activated. If &metric is lower than
+ * &low, the scheme is inactivated.
+ */
+struct damos_watermarks {
+ enum damos_wmark_metric metric;
+ unsigned long interval;
+ unsigned long high;
+ unsigned long mid;
+ unsigned long low;
+
+/* private: */
+ bool activated;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @min_sz_region: Minimum size of target regions.
+ * @max_sz_region: Maximum size of target regions.
+ * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
+ * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
+ * @min_age_region: Minimum age of target regions.
+ * @max_age_region: Maximum age of target regions.
+ * @action: &damo_action to be applied to the target regions.
+ * @quota: Control the aggressiveness of this scheme.
+ * @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @stat_count: Total number of regions that this scheme is applied.
+ * @stat_sz: Total size of regions that this scheme is applied.
+ * @list: List head for siblings.
+ *
+ * For each aggregation interval, DAMON finds regions which fit in the
+ * condition (&min_sz_region, &max_sz_region, &min_nr_accesses,
+ * &max_nr_accesses, &min_age_region, &max_age_region) and applies &action to
+ * those. To avoid consuming too much CPU time or IO resources for the
+ * &action, &quota is used.
+ *
+ * To do the work only when needed, schemes can be activated for specific
+ * system situations using &wmarks. If all schemes that registered to the
+ * monitoring context are inactive, DAMON stops monitoring either, and just
+ * repeatedly checks the watermarks.
+ *
+ * If all schemes that registered to a &struct damon_ctx are inactive, DAMON
+ * stops monitoring and just repeatedly checks the watermarks.
+ *
+ * After applying the &action to each region, &stat_count and &stat_sz is
+ * updated to reflect the number of regions and total size of regions that the
+ * &action is applied.
+ */
+struct damos {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+ enum damos_action action;
+ struct damos_quota quota;
+ struct damos_watermarks wmarks;
+ unsigned long stat_count;
+ unsigned long stat_sz;
+ struct list_head list;
+};
+
struct damon_ctx;
/**
- * struct damon_primitive Monitoring primitives for given use cases.
+ * struct damon_primitive - Monitoring primitives for given use cases.
*
* @init: Initialize primitive-internal data structures.
* @update: Update primitive-internal data structures.
* @prepare_access_checks: Prepare next access check of target regions.
* @check_accesses: Check the accesses to target regions.
* @reset_aggregated: Reset aggregated accesses monitoring results.
+ * @get_scheme_score: Get the score of a region for a scheme.
+ * @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
* @cleanup: Clean up the context.
*
@@ -94,6 +270,11 @@ struct damon_ctx;
* of its update. The value will be used for regions adjustment threshold.
* @reset_aggregated should reset the access monitoring results that aggregated
* by @check_accesses.
+ * @get_scheme_score should return the priority score of a region for a scheme
+ * as an integer in [0, &DAMOS_MAX_SCORE].
+ * @apply_scheme is called from @kdamond when a region for user provided
+ * DAMON-based operation scheme is found. It should apply the scheme's action
+ * to the region. This is not used for &DAMON_ARBITRARY_TARGET case.
* @target_valid should check whether the target is still valid for the
* monitoring.
* @cleanup is called from @kdamond just before its termination.
@@ -104,12 +285,17 @@ struct damon_primitive {
void (*prepare_access_checks)(struct damon_ctx *context);
unsigned int (*check_accesses)(struct damon_ctx *context);
void (*reset_aggregated)(struct damon_ctx *context);
+ int (*get_scheme_score)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ int (*apply_scheme)(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
bool (*target_valid)(void *target);
void (*cleanup)(struct damon_ctx *context);
};
-/*
- * struct damon_callback Monitoring events notification callbacks.
+/**
+ * struct damon_callback - Monitoring events notification callbacks.
*
* @before_start: Called before starting the monitoring.
* @after_sampling: Called after each sampling.
@@ -136,7 +322,7 @@ struct damon_callback {
int (*before_start)(struct damon_ctx *context);
int (*after_sampling)(struct damon_ctx *context);
int (*after_aggregation)(struct damon_ctx *context);
- int (*before_terminate)(struct damon_ctx *context);
+ void (*before_terminate)(struct damon_ctx *context);
};
/**
@@ -182,6 +368,7 @@ struct damon_callback {
* @min_nr_regions: The minimum number of adaptive monitoring regions.
* @max_nr_regions: The maximum number of adaptive monitoring regions.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ * @schemes: Head of schemes (&damos) list.
*/
struct damon_ctx {
unsigned long sample_interval;
@@ -194,7 +381,6 @@ struct damon_ctx {
/* public: */
struct task_struct *kdamond;
- bool kdamond_stop;
struct mutex kdamond_lock;
struct damon_primitive primitive;
@@ -203,6 +389,7 @@ struct damon_ctx {
unsigned long min_nr_regions;
unsigned long max_nr_regions;
struct list_head adaptive_targets;
+ struct list_head schemes;
};
#define damon_next_region(r) \
@@ -211,6 +398,9 @@ struct damon_ctx {
#define damon_prev_region(r) \
(container_of(r->list.prev, struct damon_region, list))
+#define damon_last_region(t) \
+ (list_last_entry(&t->regions_list, struct damon_region, list))
+
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
@@ -223,6 +413,12 @@ struct damon_ctx {
#define damon_for_each_target_safe(t, next, ctx) \
list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+#define damon_for_each_scheme(s, ctx) \
+ list_for_each_entry(s, &(ctx)->schemes, list)
+
+#define damon_for_each_scheme_safe(s, next, ctx) \
+ list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+
#ifdef CONFIG_DAMON
struct damon_region *damon_new_region(unsigned long start, unsigned long end);
@@ -232,8 +428,18 @@ inline void damon_insert_region(struct damon_region *r,
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+struct damos *damon_new_scheme(
+ unsigned long min_sz_region, unsigned long max_sz_region,
+ unsigned int min_nr_accesses, unsigned int max_nr_accesses,
+ unsigned int min_age_region, unsigned int max_age_region,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks);
+void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+void damon_destroy_scheme(struct damos *s);
+
struct damon_target *damon_new_target(unsigned long id);
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+bool damon_targets_empty(struct damon_ctx *ctx);
void damon_free_target(struct damon_target *t);
void damon_destroy_target(struct damon_target *t);
unsigned int damon_nr_regions(struct damon_target *t);
@@ -245,6 +451,8 @@ int damon_set_targets(struct damon_ctx *ctx,
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long primitive_upd_int,
unsigned long min_nr_reg, unsigned long max_nr_reg);
+int damon_set_schemes(struct damon_ctx *ctx,
+ struct damos **schemes, ssize_t nr_schemes);
int damon_nr_running_ctxs(void);
int damon_start(struct damon_ctx **ctxs, int nr_ctxs);
@@ -261,8 +469,26 @@ void damon_va_prepare_access_checks(struct damon_ctx *ctx);
unsigned int damon_va_check_accesses(struct damon_ctx *ctx);
bool damon_va_target_valid(void *t);
void damon_va_cleanup(struct damon_ctx *ctx);
+int damon_va_apply_scheme(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
+int damon_va_scheme_score(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
void damon_va_set_primitives(struct damon_ctx *ctx);
#endif /* CONFIG_DAMON_VADDR */
+#ifdef CONFIG_DAMON_PADDR
+
+/* Monitoring primitives for the physical memory address space */
+void damon_pa_prepare_access_checks(struct damon_ctx *ctx);
+unsigned int damon_pa_check_accesses(struct damon_ctx *ctx);
+bool damon_pa_target_valid(void *t);
+int damon_pa_apply_scheme(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
+int damon_pa_scheme_score(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
+void damon_pa_set_primitives(struct damon_ctx *ctx);
+
+#endif /* CONFIG_DAMON_PADDR */
+
#endif /* _DAMON_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 2619d94c308d..8623caa67388 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -38,8 +38,6 @@ struct dax_operations {
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
};
-extern struct attribute_group dax_attribute_group;
-
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *alloc_dax(void *private, const char *host,
const struct dax_operations *ops, unsigned long flags);
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 3f49e65169c6..dbb409d77d4f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -47,8 +47,6 @@ extern int debug_locks_off(void);
# define locking_selftest() do { } while (0)
#endif
-struct task_struct;
-
#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 868e9eacd69e..9192986b1a73 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,13 +25,21 @@
#define STATIC_RW_DATA static
#endif
+/*
+ * When an architecture needs to share the malloc()/free() implementation
+ * between compilation units, it needs to have non-local visibility.
+ */
+#ifndef MALLOC_VISIBLE
+#define MALLOC_VISIBLE static
+#endif
+
/* A trivial malloc implementation, adapted from
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*/
STATIC_RW_DATA unsigned long malloc_ptr;
STATIC_RW_DATA int malloc_count;
-static void *malloc(int size)
+MALLOC_VISIBLE void *malloc(int size)
{
void *p;
@@ -52,7 +60,7 @@ static void *malloc(int size)
return p;
}
-static void free(void *where)
+MALLOC_VISIBLE void free(void *where)
{
malloc_count--;
if (!malloc_count)
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 1d0e2ce6b6d9..8eacf67eb212 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -19,7 +19,7 @@
* https://lists.openwall.net/linux-kernel/2011/01/09/56
*/
-#include <linux/kernel.h>
+#include <linux/math.h>
extern unsigned long loops_per_jiffy;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 114553b487ef..a7df155ea49b 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -576,9 +576,9 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
- * Table keyslot manager functions
+ * Table blk_crypto_profile functions
*/
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
/*-----------------------------------------------------------------
* Macros.
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 062777a45a74..a039ab809753 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -143,6 +143,7 @@ int device_match_of_node(struct device *dev, const void *np);
int device_match_fwnode(struct device *dev, const void *fwnode);
int device_match_devt(struct device *dev, const void *pdevt);
int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);
/* iterator helpers for buses */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 8b32b4bdd590..42a323a73c61 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -86,8 +86,8 @@ struct dma_buf_ops {
* @pin:
*
* This is called by dma_buf_pin() and lets the exporter know that the
- * DMA-buf can't be moved any more. The exporter should pin the buffer
- * into system memory to make sure it is generally accessible by other
+ * DMA-buf can't be moved any more. Ideally, the exporter should
+ * pin the buffer so that it is generally accessible by all
* devices.
*
* This is called with the &dmabuf.resv object locked and is mutual
@@ -420,6 +420,13 @@ struct dma_buf {
* - Dynamic importers should set fences for any access that they can't
* disable immediately from their &dma_buf_attach_ops.move_notify
* callback.
+ *
+ * IMPORTANT:
+ *
+ * All drivers must obey the struct dma_resv rules, specifically the
+ * rules for updating fences, see &dma_resv.fence_excl and
+ * &dma_resv.fence. If these dependency rules are broken access tracking
+ * can be lost resulting in use after free issues.
*/
struct dma_resv *resv;
@@ -433,7 +440,7 @@ struct dma_buf {
wait_queue_head_t *poll;
__poll_t active;
- } cb_excl, cb_shared;
+ } cb_in, cb_out;
#ifdef CONFIG_DMABUF_SYSFS_STATS
/**
* @sysfs_entry:
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 6ffb4b2c6371..a706b7bf51d7 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -214,19 +214,15 @@ struct dma_fence_ops {
* Custom wait implementation, defaults to dma_fence_default_wait() if
* not set.
*
- * The dma_fence_default_wait implementation should work for any fence, as long
- * as @enable_signaling works correctly. This hook allows drivers to
- * have an optimized version for the case where a process context is
- * already available, e.g. if @enable_signaling for the general case
- * needs to set up a worker thread.
+ * Deprecated and should not be used by new implementations. Only used
+ * by existing implementations which need special handling for their
+ * hardware reset procedure.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
* timed out. Can also return other error values on custom implementations,
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
- *
- * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -590,26 +586,4 @@ struct dma_fence *dma_fence_get_stub(void);
struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num);
-#define DMA_FENCE_TRACE(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
- pr_info("f %llu#%llu: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
- ##args); \
- } while (0)
-
-#define DMA_FENCE_ERR(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index e1ca2080a1ff..dbd235ab447f 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -62,19 +62,211 @@ struct dma_resv_list {
/**
* struct dma_resv - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
+ *
+ * There are multiple uses for this, with sometimes slightly different rules in
+ * how the fence slots are used.
+ *
+ * One use is to synchronize cross-driver access to a struct dma_buf, either for
+ * dynamic buffer management or just to handle implicit synchronization between
+ * different users of the buffer in userspace. See &dma_buf.resv for a more
+ * in-depth discussion.
+ *
+ * The other major use is to manage access and locking within a driver in a
+ * buffer based memory manager. struct ttm_buffer_object is the canonical
+ * example here, since this is where reservation objects originated from. But
+ * use in drivers is spreading and some drivers also manage struct
+ * drm_gem_object with the same scheme.
*/
struct dma_resv {
+ /**
+ * @lock:
+ *
+ * Update side lock. Don't use directly, instead use the wrapper
+ * functions like dma_resv_lock() and dma_resv_unlock().
+ *
+ * Drivers which use the reservation object to manage memory dynamically
+ * also use this lock to protect buffer object state like placement,
+ * allocation policies or throughout command submission.
+ */
struct ww_mutex lock;
+
+ /**
+ * @seq:
+ *
+ * Sequence count for managing RCU read-side synchronization, allows
+ * read-only access to @fence_excl and @fence while ensuring we take a
+ * consistent snapshot.
+ */
seqcount_ww_mutex_t seq;
+ /**
+ * @fence_excl:
+ *
+ * The exclusive fence, if there is one currently.
+ *
+ * There are two ways to update this fence:
+ *
+ * - First by calling dma_resv_add_excl_fence(), which replaces all
+ * fences attached to the reservation object. To guarantee that no
+ * fences are lost, this new fence must signal only after all previous
+ * fences, both shared and exclusive, have signalled. In some cases it
+ * is convenient to achieve that by attaching a struct dma_fence_array
+ * with all the new and old fences.
+ *
+ * - Alternatively the fence can be set directly, which leaves the
+ * shared fences unchanged. To guarantee that no fences are lost, this
+ * new fence must signal only after the previous exclusive fence has
+ * signalled. Since the shared fences are staying intact, it is not
+ * necessary to maintain any ordering against those. If semantically
+ * only a new access is added without actually treating the previous
+ * one as a dependency the exclusive fences can be strung together
+ * using struct dma_fence_chain.
+ *
+ * Note that actual semantics of what an exclusive or shared fence mean
+ * is defined by the user, for reservation objects shared across drivers
+ * see &dma_buf.resv.
+ */
struct dma_fence __rcu *fence_excl;
+
+ /**
+ * @fence:
+ *
+ * List of current shared fences.
+ *
+ * There are no ordering constraints of shared fences against the
+ * exclusive fence slot. If a waiter needs to wait for all access, it
+ * has to wait for both sets of fences to signal.
+ *
+ * A new fence is added by calling dma_resv_add_shared_fence(). Since
+ * this often needs to be done past the point of no return in command
+ * submission it cannot fail, and therefore sufficient slots need to be
+ * reserved by calling dma_resv_reserve_shared().
+ *
+ * Note that actual semantics of what an exclusive or shared fence mean
+ * is defined by the user, for reservation objects shared across drivers
+ * see &dma_buf.resv.
+ */
struct dma_resv_list __rcu *fence;
};
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @all_fences: If all fences should be returned */
+ bool all_fences;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @seq: sequence number to check for modifications */
+ unsigned int seq;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences; private, *MUST* not dereference */
+ struct dma_resv_list *fences;
+
+ /** @shared_count: number of shared fences */
+ unsigned int shared_count;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
+
+/**
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @all_fences: If all fences should be returned or just the exclusive one
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ bool all_fences)
+{
+ cursor->obj = obj;
+ cursor->all_fences = all_fences;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * @cursor: the cursor of the current position
+ *
+ * Returns true if the currently returned fence is the exclusive one.
+ */
+static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+{
+ return cursor->index == 0;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
+ *
+ * Return true if this is the first fence in an iteration after a restart.
+ */
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
+{
+ return cursor->is_restarted;
+}
+
+/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ * When the dma_resv is modified the iteration starts over again.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @all_fences: true if all fences should be returned
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \
+ for (dma_resv_iter_begin(cursor, obj, all_fences), \
+ fence = dma_resv_iter_first(cursor); fence; \
+ fence = dma_resv_iter_next(cursor))
+
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -98,6 +290,13 @@ static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ *
+ * See also dma_resv_lock_interruptible() for the interruptible variant.
*/
static inline int dma_resv_lock(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -119,6 +318,12 @@ static inline int dma_resv_lock(struct dma_resv *obj,
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
+ * @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
*/
static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -134,6 +339,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
* Acquires the reservation object after a die case. This function
* will sleep until the lock becomes available. See dma_resv_lock() as
* well.
+ *
+ * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
*/
static inline void dma_resv_lock_slow(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -167,13 +374,13 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
* if they overlap with a writer.
*
* Also note that since no context is provided, no deadlock protection is
- * possible.
+ * possible, which is also not needed for a trylock.
*
* Returns true if the lock was acquired, false otherwise.
*/
static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
{
- return ww_mutex_trylock(&obj->lock);
+ return ww_mutex_trylock(&obj->lock, NULL);
}
/**
@@ -193,6 +400,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj)
*
* Returns the context used to lock a reservation object or NULL if no context
* was used or the object is not locked at all.
+ *
+ * WARNING: This interface is pretty horrible, but TTM needs it because it
+ * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
+ * Everyone else just uses it to check whether they're holding a reservation or
+ * not.
*/
static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
{
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e5c2c9e71bf1..9000f3ffce8b 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -944,10 +944,8 @@ struct dma_device {
void (*device_issue_pending)(struct dma_chan *chan);
void (*device_release)(struct dma_device *dev);
/* debugfs support */
-#ifdef CONFIG_DEBUG_FS
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
struct dentry *dbg_dev_root;
-#endif
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e04436a7ff27..45e903d84733 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -131,6 +131,14 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+#ifdef CONFIG_DMAR_DEBUG
+void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid);
+#else
+static inline void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid) {}
+#endif
+
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index c7fa4a3498fe..254b165f2b44 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
struct dsa_switch;
+struct dsa_port;
struct sk_buff;
struct net_device;
@@ -45,9 +46,9 @@ void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num);
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
+u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp);
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
+u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp);
int dsa_8021q_rx_switch_id(u16 vid);
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index 8ae999f587c4..7ee708ad7df2 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -12,6 +12,7 @@
struct ocelot_skb_cb {
struct sk_buff *clone;
unsigned int ptp_class; /* valid only for clones */
+ u32 tstamp_lo;
u8 ptp_cmd;
u8 ts_id;
};
@@ -242,9 +243,9 @@ static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
}
-static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
+static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
{
- packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
}
/* Determine the PTP REW_OP to use for injecting the given skb */
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 9e07079528a5..e6c78be40bde 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -69,7 +69,6 @@ struct sja1105_port {
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data;
- struct dsa_port *dp;
bool hwts_tx_en;
};
diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h
index e80a332e3d8a..2890f6370eb9 100644
--- a/include/linux/dtpm.h
+++ b/include/linux/dtpm.h
@@ -23,34 +23,32 @@ struct dtpm {
u64 power_max;
u64 power_min;
int weight;
- void *private;
};
struct dtpm_ops {
u64 (*set_power_uw)(struct dtpm *, u64);
u64 (*get_power_uw)(struct dtpm *);
+ int (*update_power_uw)(struct dtpm *);
void (*release)(struct dtpm *);
};
-struct dtpm_descr;
-
-typedef int (*dtpm_init_t)(struct dtpm_descr *);
+typedef int (*dtpm_init_t)(void);
struct dtpm_descr {
- struct dtpm *parent;
- const char *name;
dtpm_init_t init;
};
/* Init section thermal table */
-extern struct dtpm_descr *__dtpm_table[];
-extern struct dtpm_descr *__dtpm_table_end[];
+extern struct dtpm_descr __dtpm_table[];
+extern struct dtpm_descr __dtpm_table_end[];
-#define DTPM_TABLE_ENTRY(name) \
- static typeof(name) *__dtpm_table_entry_##name \
- __used __section("__dtpm_table") = &name
+#define DTPM_TABLE_ENTRY(name, __init) \
+ static struct dtpm_descr __dtpm_table_entry_##name \
+ __used __section("__dtpm_table") = { \
+ .init = __init, \
+ }
-#define DTPM_DECLARE(name) DTPM_TABLE_ENTRY(name)
+#define DTPM_DECLARE(name, init) DTPM_TABLE_ENTRY(name, init)
#define for_each_dtpm_table(__dtpm) \
for (__dtpm = __dtpm_table; \
@@ -62,11 +60,11 @@ static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
return container_of(zone, struct dtpm, zone);
}
-int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max);
+int dtpm_update_power(struct dtpm *dtpm);
int dtpm_release_zone(struct powercap_zone *pcz);
-struct dtpm *dtpm_alloc(struct dtpm_ops *ops);
+void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops);
void dtpm_unregister(struct dtpm *dtpm);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6b5d36babfcc..dbd39b20e034 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -362,6 +362,7 @@ void efi_native_runtime_setup(void);
/* OEM GUIDs */
#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
+#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
typedef struct {
efi_guid_t guid;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
deleted file mode 100644
index ef9ceead3db1..000000000000
--- a/include/linux/elevator.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_ELEVATOR_H
-#define _LINUX_ELEVATOR_H
-
-#include <linux/percpu.h>
-#include <linux/hashtable.h>
-
-#ifdef CONFIG_BLOCK
-
-struct io_cq;
-struct elevator_type;
-#ifdef CONFIG_BLK_DEBUG_FS
-struct blk_mq_debugfs_attr;
-#endif
-
-/*
- * Return values from elevator merger
- */
-enum elv_merge {
- ELEVATOR_NO_MERGE = 0,
- ELEVATOR_FRONT_MERGE = 1,
- ELEVATOR_BACK_MERGE = 2,
- ELEVATOR_DISCARD_MERGE = 3,
-};
-
-struct blk_mq_alloc_data;
-struct blk_mq_hw_ctx;
-
-struct elevator_mq_ops {
- int (*init_sched)(struct request_queue *, struct elevator_type *);
- void (*exit_sched)(struct elevator_queue *);
- int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*depth_updated)(struct blk_mq_hw_ctx *);
-
- bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
- bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
- int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
- void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
- void (*requests_merged)(struct request_queue *, struct request *, struct request *);
- void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
- void (*prepare_request)(struct request *);
- void (*finish_request)(struct request *);
- void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
- struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
- bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *, u64);
- void (*requeue_request)(struct request *);
- struct request *(*former_request)(struct request_queue *, struct request *);
- struct request *(*next_request)(struct request_queue *, struct request *);
- void (*init_icq)(struct io_cq *);
- void (*exit_icq)(struct io_cq *);
-};
-
-#define ELV_NAME_MAX (16)
-
-struct elv_fs_entry {
- struct attribute attr;
- ssize_t (*show)(struct elevator_queue *, char *);
- ssize_t (*store)(struct elevator_queue *, const char *, size_t);
-};
-
-/*
- * identifies an elevator type, such as AS or deadline
- */
-struct elevator_type
-{
- /* managed by elevator core */
- struct kmem_cache *icq_cache;
-
- /* fields provided by elevator implementation */
- struct elevator_mq_ops ops;
-
- size_t icq_size; /* see iocontext.h */
- size_t icq_align; /* ditto */
- struct elv_fs_entry *elevator_attrs;
- const char *elevator_name;
- const char *elevator_alias;
- const unsigned int elevator_features;
- struct module *elevator_owner;
-#ifdef CONFIG_BLK_DEBUG_FS
- const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
- const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
-#endif
-
- /* managed by elevator core */
- char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
- struct list_head list;
-};
-
-#define ELV_HASH_BITS 6
-
-void elv_rqhash_del(struct request_queue *q, struct request *rq);
-void elv_rqhash_add(struct request_queue *q, struct request *rq);
-void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
-struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
-
-/*
- * each queue has an elevator_queue associated with it
- */
-struct elevator_queue
-{
- struct elevator_type *type;
- void *elevator_data;
- struct kobject kobj;
- struct mutex sysfs_lock;
- unsigned int registered:1;
- DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
-};
-
-/*
- * block elevator interface
- */
-extern enum elv_merge elv_merge(struct request_queue *, struct request **,
- struct bio *);
-extern void elv_merge_requests(struct request_queue *, struct request *,
- struct request *);
-extern void elv_merged_request(struct request_queue *, struct request *,
- enum elv_merge);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
- struct list_head *);
-extern struct request *elv_former_request(struct request_queue *, struct request *);
-extern struct request *elv_latter_request(struct request_queue *, struct request *);
-void elevator_init_mq(struct request_queue *q);
-
-/*
- * io scheduler registration
- */
-extern int elv_register(struct elevator_type *);
-extern void elv_unregister(struct elevator_type *);
-
-/*
- * io scheduler sysfs switching
- */
-extern ssize_t elv_iosched_show(struct request_queue *, char *);
-extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-
-extern bool elv_bio_merge_ok(struct request *, struct bio *);
-extern struct elevator_queue *elevator_alloc(struct request_queue *,
- struct elevator_type *);
-
-/*
- * Helper functions.
- */
-extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
-extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
-
-/*
- * rb support functions.
- */
-extern void elv_rb_add(struct rb_root *, struct request *);
-extern void elv_rb_del(struct rb_root *, struct request *);
-extern struct request *elv_rb_find(struct rb_root *, sector_t);
-
-/*
- * Insertion selection
- */
-#define ELEVATOR_INSERT_FRONT 1
-#define ELEVATOR_INSERT_BACK 2
-#define ELEVATOR_INSERT_SORT 3
-#define ELEVATOR_INSERT_REQUEUE 4
-#define ELEVATOR_INSERT_FLUSH 5
-#define ELEVATOR_INSERT_SORT_MERGE 6
-
-#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
-#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
-
-#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-
-/*
- * Elevator features.
- */
-
-/* Supports zoned block devices sequential write constraint */
-#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-/* Supports scheduling on multiple hardware queues */
-#define ELEVATOR_F_MQ_AWARE (1U << 1)
-
-#endif /* CONFIG_BLOCK */
-#endif
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 2aaa15779d50..957ebec35aad 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -109,7 +109,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
#endif
}
-#if defined(CONFIG_UM) || defined(CONFIG_IA64)
+#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 39dcadd492b5..6377adc3b78d 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -17,19 +17,30 @@
* device). It can be a total power: static and dynamic.
* @cost: The cost coefficient associated with this level, used during
* energy calculation. Equal to: power * max_frequency / frequency
+ * @flags: see "em_perf_state flags" description below.
*/
struct em_perf_state {
unsigned long frequency;
unsigned long power;
unsigned long cost;
+ unsigned long flags;
};
+/*
+ * em_perf_state flags:
+ *
+ * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
+ * in this em_perf_domain, another performance state with a higher frequency
+ * but a lower or equal power cost. Such inefficient states are ignored when
+ * using em_pd_get_efficient_*() functions.
+ */
+#define EM_PERF_STATE_INEFFICIENT BIT(0)
+
/**
* struct em_perf_domain - Performance domain
* @table: List of performance states, in ascending order
* @nr_perf_states: Number of performance states
- * @milliwatts: Flag indicating the power values are in milli-Watts
- * or some other scale.
+ * @flags: See "em_perf_domain flags"
* @cpus: Cpumask covering the CPUs of the domain. It's here
* for performance reasons to avoid potential cache
* misses during energy calculations in the scheduler
@@ -44,10 +55,22 @@ struct em_perf_state {
struct em_perf_domain {
struct em_perf_state *table;
int nr_perf_states;
- int milliwatts;
+ unsigned long flags;
unsigned long cpus[];
};
+/*
+ * em_perf_domain flags:
+ *
+ * EM_PERF_DOMAIN_MILLIWATTS: The power values are in milli-Watts or some
+ * other scale.
+ *
+ * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
+ * energy consumption.
+ */
+#define EM_PERF_DOMAIN_MILLIWATTS BIT(0)
+#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
+
#define em_span_cpus(em) (to_cpumask((em)->cpus))
#ifdef CONFIG_ENERGY_MODEL
@@ -102,6 +125,37 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
void em_dev_unregister_perf_domain(struct device *dev);
/**
+ * em_pd_get_efficient_state() - Get an efficient performance state from the EM
+ * @pd : Performance domain for which we want an efficient frequency
+ * @freq : Frequency to map with the EM
+ *
+ * It is called from the scheduler code quite frequently and as a consequence
+ * doesn't implement any check.
+ *
+ * Return: An efficient performance state, high enough to meet @freq
+ * requirement.
+ */
+static inline
+struct em_perf_state *em_pd_get_efficient_state(struct em_perf_domain *pd,
+ unsigned long freq)
+{
+ struct em_perf_state *ps;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+ ps = &pd->table[i];
+ if (ps->frequency >= freq) {
+ if (pd->flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
+ ps->flags & EM_PERF_STATE_INEFFICIENT)
+ continue;
+ break;
+ }
+ }
+
+ return ps;
+}
+
+/**
* em_cpu_energy() - Estimates the energy consumed by the CPUs of a
* performance domain
* @pd : performance domain for which energy has to be estimated
@@ -123,7 +177,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
{
unsigned long freq, scale_cpu;
struct em_perf_state *ps;
- int i, cpu;
+ int cpu;
if (!sum_util)
return 0;
@@ -148,11 +202,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* Find the lowest performance state of the Energy Model above the
* requested frequency.
*/
- for (i = 0; i < pd->nr_perf_states; i++) {
- ps = &pd->table[i];
- if (ps->frequency >= freq)
- break;
- }
+ ps = em_pd_get_efficient_state(pd, freq);
/*
* The capacity of a CPU in the domain at the performance state (ps)
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index c58d50451485..2ad71cc90b37 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -26,9 +26,16 @@
#ifdef __KERNEL__
struct device;
+struct fwnode_handle;
+
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
+int device_get_mac_address(struct device *dev, char *addr);
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
+
u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -227,8 +234,6 @@ static inline void eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
-#define random_ether_addr(addr) eth_random_addr(addr)
-
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -262,8 +267,11 @@ static inline void eth_zero_addr(u8 *addr)
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(dev->dev_addr);
}
/**
@@ -323,7 +331,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
- ether_addr_copy(dst->dev_addr, src->dev_addr);
+ eth_hw_addr_set(dst, src->dev_addr);
}
/**
@@ -544,6 +552,27 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
}
/**
+ * eth_hw_addr_gen - Generate and assign Ethernet address to a port
+ * @dev: pointer to port's net_device structure
+ * @base_addr: base Ethernet address
+ * @id: offset to add to the base address
+ *
+ * Generate a MAC address using a base address and an offset and assign it
+ * to a net_device. Commonly used by switch drivers which need to compute
+ * addresses for all their ports. addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ unsigned int id)
+{
+ u64 u = ether_addr_to_u64(base_addr);
+ u8 addr[ETH_ALEN];
+
+ u += id;
+ u64_to_ether_addr(u, addr);
+ eth_hw_addr_set(dev, addr);
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 849524b55d89..845a0ffc16ee 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -94,6 +94,7 @@ struct ethtool_link_ext_state_info {
enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
enum ethtool_link_ext_substate_cable_issue cable_issue;
+ enum ethtool_link_ext_substate_module module;
u8 __link_ext_substate;
};
};
@@ -416,6 +417,17 @@ struct ethtool_module_eeprom {
};
/**
+ * struct ethtool_module_power_mode_params - module power mode parameters
+ * @policy: The power mode policy enforced by the host for the plug-in module.
+ * @mode: The operational power mode of the plug-in module. Should be filled by
+ * device drivers on get operations.
+ */
+struct ethtool_module_power_mode_params {
+ enum ethtool_module_power_mode_policy policy;
+ enum ethtool_module_power_mode mode;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
@@ -580,6 +592,11 @@ struct ethtool_module_eeprom {
* @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
* @get_rmon_stats: Query some of the RMON (RFC 2819) statistics.
* Set %ranges to a pointer to zero-terminated array of byte ranges.
+ * @get_module_power_mode: Get the power mode policy for the plug-in module
+ * used by the network device and its operational power mode, if
+ * plugged-in.
+ * @set_module_power_mode: Set the power mode policy for the plug-in module
+ * used by the network device.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -705,6 +722,12 @@ struct ethtool_ops {
void (*get_rmon_stats)(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges);
+ int (*get_module_power_mode)(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*set_module_power_mode)(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
};
int ethtool_check_ops(const struct ethtool_ops *ops);
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index 1e7bf78cb382..aba348d58ff6 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -10,6 +10,9 @@
#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES)
+
enum ethtool_multicast_groups {
ETHNL_MCGRP_MONITOR,
};
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index eec3b7c40811..616af2ea20f3 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -84,13 +84,20 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
*/
#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
+/* Events that can be reported with event->fd */
+#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
+
/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF)
+/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
+#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
- FANOTIFY_INODE_EVENTS)
+ FANOTIFY_INODE_EVENTS | \
+ FANOTIFY_ERROR_EVENTS)
/* Events that require a permission response from user */
#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 5950f8f5dc74..6f3db99ab990 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -262,7 +262,7 @@ struct fb_ops {
/* Draws a rectangle */
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect);
- /* Copy data from area to another */
+ /* Copy data from area to another. Obsolete. */
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region);
/* Draws a image to the display */
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 4a93c12543ee..24b7ed2677af 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -360,10 +360,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = TGT })
-/* Function call */
+/* Convert function address to BPF immediate */
-#define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@ -371,7 +370,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@ -554,9 +553,9 @@ struct bpf_binary_header {
};
struct bpf_prog_stats {
- u64 cnt;
- u64 nsecs;
- u64 misses;
+ u64_stats_t cnt;
+ u64_stats_t nsecs;
+ u64_stats_t misses;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
@@ -586,8 +585,10 @@ struct bpf_prog {
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
/* Instructions for interpreter */
- struct sock_filter insns[0];
- struct bpf_insn insnsi[];
+ union {
+ DECLARE_FLEX_ARRAY(struct sock_filter, insns);
+ DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
+ };
};
struct sk_filter {
@@ -613,13 +614,14 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
struct bpf_prog_stats *stats;
u64 start = sched_clock();
+ unsigned long flags;
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
stats = this_cpu_ptr(prog->stats);
- u64_stats_update_begin(&stats->syncp);
- stats->cnt++;
- stats->nsecs += sched_clock() - start;
- u64_stats_update_end(&stats->syncp);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
} else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
}
@@ -1051,6 +1053,7 @@ extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index aec8f30ab200..07967a450eaa 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -436,6 +436,12 @@ typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
+
+union fw_iso_callback {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+};
+
struct fw_iso_context {
struct fw_card *card;
int type;
@@ -443,10 +449,7 @@ struct fw_iso_context {
int speed;
bool drop_overflow_headers;
size_t header_size;
- union {
- fw_iso_callback_t sc;
- fw_iso_mc_callback_t mc;
- } callback;
+ union fw_iso_callback callback;
void *callback_data;
};
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 25109192cebe..3b057dfc8284 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -20,23 +20,19 @@ struct firmware {
struct module;
struct device;
-struct builtin_fw {
- char *name;
- void *data;
- unsigned long size;
-};
-
-/* We have to play tricks here much like stringify() to get the
- __COUNTER__ macro to be expanded as we want it */
-#define __fw_concat1(x, y) x##y
-#define __fw_concat(x, y) __fw_concat1(x, y)
-
-#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
- DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
-
-#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
- static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
- __used __section(".builtin_fw") = { name, blob, size }
+/*
+ * Built-in firmware functionality is only available if FW_LOADER=y, but not
+ * FW_LOADER=m
+ */
+#ifdef CONFIG_FW_LOADER
+bool firmware_request_builtin(struct firmware *fw, const char *name);
+#else
+static inline bool firmware_request_builtin(struct firmware *fw,
+ const char *name)
+{
+ return false;
+}
+#endif
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
int request_firmware(const struct firmware **fw, const char *name,
diff --git a/include/linux/firmware/imx/s4.h b/include/linux/firmware/imx/s4.h
new file mode 100644
index 000000000000..9e34923ae1d6
--- /dev/null
+++ b/include/linux/firmware/imx/s4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _S4_IPC_H
+#define _S4_IPC_H
+
+struct imx_s4_ipc;
+
+struct imx_s4_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t cmd;
+ uint8_t tag;
+} __packed;
+
+#endif /* _S4_IPC_H */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 56b426fe020c..47fd4e52a423 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -72,6 +72,8 @@ enum pm_api_id {
PM_SET_REQUIREMENT = 15,
PM_RESET_ASSERT = 17,
PM_RESET_GET_STATUS = 18,
+ PM_MMIO_WRITE = 19,
+ PM_MMIO_READ = 20,
PM_PM_INIT_FINALIZE = 21,
PM_FPGA_LOAD = 22,
PM_FPGA_GET_STATUS = 23,
@@ -123,6 +125,7 @@ enum pm_ioctl_id {
IOCTL_READ_PGGS = 15,
/* Set healthy bit value */
IOCTL_SET_BOOT_HEALTH_STATUS = 17,
+ IOCTL_OSPI_MUX_SELECT = 21,
};
enum pm_query_id {
@@ -351,6 +354,11 @@ enum zynqmp_pm_shutdown_subtype {
ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
};
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA = 0,
+ PM_OSPI_MUX_SEL_LINEAR = 1,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -387,9 +395,12 @@ int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
const enum zynqmp_pm_reset_action assert_flag);
int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
+int zynqmp_pm_bootmode_write(u32 ps_mode);
int zynqmp_pm_init_finalize(void);
int zynqmp_pm_set_suspend_mode(u32 mode);
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
@@ -508,6 +519,11 @@ static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
return -ENODEV;
}
+static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
const enum zynqmp_pm_reset_action assert_flag)
{
@@ -520,6 +536,16 @@ static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
return -ENODEV;
}
+static inline unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_bootmode_write(u32 ps_mode)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_init_finalize(void)
{
return -ENODEV;
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index c12df59d3f5f..3e378b1fb0bc 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -83,9 +83,10 @@ struct fprop_local_percpu {
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
- int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
@@ -96,7 +97,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
unsigned long flags;
local_irq_save(flags);
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, 1);
local_irq_restore(flags);
}
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index c1be37437e77..a6cd6815f249 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -2,6 +2,27 @@
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
+#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
+#define __RENAME(x) __asm__(#x)
+
+void fortify_panic(const char *name) __noreturn __cold;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
+
+#define __compiletime_strlen(p) \
+({ \
+ unsigned char *__p = (unsigned char *)(p); \
+ size_t __ret = (size_t)-1; \
+ size_t __p_size = __builtin_object_size(p, 1); \
+ if (__p_size != (size_t)-1) { \
+ size_t __p_len = __p_size - 1; \
+ if (__builtin_constant_p(__p[__p_len]) && \
+ __p[__p_len] == '\0') \
+ __ret = __builtin_strlen(__p); \
+ } \
+ __ret; \
+})
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
@@ -49,28 +70,38 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q)
return p;
}
-__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
{
- __kernel_size_t ret;
size_t p_size = __builtin_object_size(p, 1);
+ size_t p_len = __compiletime_strlen(p);
+ size_t ret;
- /* Work around gcc excess stack consumption issue */
- if (p_size == (size_t)-1 ||
- (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
- return __underlying_strlen(p);
- ret = strnlen(p, p_size);
- if (p_size <= ret)
+ /* We can take compile-time actions when maxlen is const. */
+ if (__builtin_constant_p(maxlen) && p_len != (size_t)-1) {
+ /* If p is const, we can use its compile-time-known len. */
+ if (maxlen >= p_size)
+ return p_len;
+ }
+
+ /* Do not check characters beyond the end of p. */
+ ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
fortify_panic(__func__);
return ret;
}
-extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
-__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+/* defined after fortified strnlen to reuse it. */
+__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
{
+ __kernel_size_t ret;
size_t p_size = __builtin_object_size(p, 1);
- __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
- if (p_size <= ret && maxlen != ret)
+ /* Give up if we don't know how large p is. */
+ if (p_size == (size_t)-1)
+ return __underlying_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
fortify_panic(__func__);
return ret;
}
@@ -79,24 +110,27 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
{
- size_t ret;
size_t p_size = __builtin_object_size(p, 1);
size_t q_size = __builtin_object_size(q, 1);
+ size_t q_len; /* Full count of source string length. */
+ size_t len; /* Count of characters going into destination. */
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __real_strlcpy(p, q, size);
- ret = strlen(q);
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
-
- if (__builtin_constant_p(len) && len >= p_size)
+ q_len = strlen(q);
+ len = (q_len >= size) ? size - 1 : q_len;
+ if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) {
+ /* Write size is always larger than destination. */
+ if (len >= p_size)
__write_overflow();
+ }
+ if (size) {
if (len >= p_size)
fortify_panic(__func__);
__underlying_memcpy(p, q, len);
p[len] = '\0';
}
- return ret;
+ return q_len;
}
/* defined after fortified strnlen to reuse it */
@@ -280,7 +314,10 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __underlying_strcpy(p, q);
size = strlen(q) + 1;
- /* test here to use the more stringent object size */
+ /* Compile-time check for const size overflow. */
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ /* Run-time check for dynamic size overflow. */
if (p_size < size)
fortify_panic(__func__);
memcpy(p, q, size);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e7a633353fd2..1cb616fc1105 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -48,6 +48,7 @@
struct backing_dev_info;
struct bdi_writeback;
struct bio;
+struct io_comp_batch;
struct export_operations;
struct fiemap_extent_info;
struct hd_geometry;
@@ -329,16 +330,12 @@ struct kiocb {
randomized_struct_fields_start
loff_t ki_pos;
- void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
- union {
- unsigned int ki_cookie; /* for ->iopoll */
- struct wait_page_queue *ki_waitq; /* for async buffered IO */
- };
-
+ struct wait_page_queue *ki_waitq; /* for async buffered IO */
randomized_struct_fields_end
};
@@ -1443,6 +1440,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
/* Possible states of 'frozen' field */
enum {
@@ -2075,7 +2073,8 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
- int (*iopoll)(struct kiocb *kiocb, bool spin);
+ int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
+ unsigned int flags);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
@@ -2498,6 +2497,8 @@ enum file_time_flags {
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
+int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);
+
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
@@ -3192,6 +3193,7 @@ static inline void remove_inode_hash(struct inode *inode)
}
extern void inode_sb_list_add(struct inode *inode);
+extern void inode_add_lru(struct inode *inode);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
@@ -3383,6 +3385,8 @@ extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
+extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename(struct user_namespace *, struct inode *,
struct dentry *, struct inode *, struct dentry *,
unsigned int);
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index a4dab5998613..3b2282c157f7 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -167,7 +167,7 @@ struct fscache_cookie {
static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
{
- return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+ return fscache_cookie_valid(cookie) && test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
}
/*
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index e912ed9141d9..91ea9477e9bd 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -118,9 +118,6 @@ struct fscrypt_operations {
*/
bool (*empty_dir)(struct inode *inode);
- /* The filesystem's maximum ciphertext filename length, in bytes */
- unsigned int max_namelen;
-
/*
* Check whether the filesystem's inode numbers and UUID are stable,
* meaning that they will never be changed even by offline operations
diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h
index d4cdc2aa6e33..7ee3dbd7f4b3 100644
--- a/include/linux/fsi-occ.h
+++ b/include/linux/fsi-occ.h
@@ -19,6 +19,8 @@ struct device;
#define OCC_RESP_CRIT_OCB 0xE3
#define OCC_RESP_CRIT_HW 0xE4
+#define OCC_MAX_RESP_WORDS 2048
+
int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
void *response, size_t *resp_len);
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 30ece3ae6df7..e026f6c48b49 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -620,6 +620,20 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
/**
* struct dpcon_attr - Structure representing DPCON attributes
* @id: DPCON object ID
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 12d3a7d308ab..787545e87eeb 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -26,20 +26,20 @@
* FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only
* the child is interested and not the parent.
*/
-static inline void fsnotify_name(struct inode *dir, __u32 mask,
- struct inode *child,
- const struct qstr *name, u32 cookie)
+static inline int fsnotify_name(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ u32 cookie)
{
if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0)
- return;
+ return 0;
- fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
+ return fsnotify(mask, data, data_type, dir, name, NULL, cookie);
}
static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
__u32 mask)
{
- fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0);
+ fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0);
}
static inline void fsnotify_inode(struct inode *inode, __u32 mask)
@@ -86,7 +86,7 @@ notify_child:
*/
static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
{
- fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE);
+ fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
}
static inline int fsnotify_file(struct file *file, __u32 mask)
@@ -154,8 +154,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
new_dir_mask |= FS_ISDIR;
}
- fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie);
- fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie);
+ fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ old_dir, old_name, fs_cookie);
+ fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ new_dir, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
@@ -190,16 +192,22 @@ static inline void fsnotify_inoderemove(struct inode *inode)
/*
* fsnotify_create - 'name' was linked in
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_create(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE);
+ fsnotify_dirent(dir, dentry, FS_CREATE);
}
/*
* fsnotify_link - new hardlink in 'inode' directory
+ *
+ * Caller must make sure that new_dentry->d_name is stable.
* Note: We have to pass also the linked inode ptr as some filesystems leave
* new_dentry->d_inode NULL and instantiate inode pointer later
*/
@@ -209,7 +217,8 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0);
+ fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE,
+ dir, &new_dentry->d_name, 0);
}
/*
@@ -227,12 +236,16 @@ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
/*
* fsnotify_mkdir - directory 'name' was created
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR);
+ fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR);
}
/*
@@ -326,4 +339,17 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
fsnotify_dentry(dentry, mask);
}
+static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
+ int error)
+{
+ struct fs_error_report report = {
+ .error = error,
+ .inode = inode,
+ .sb = sb,
+ };
+
+ return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR,
+ NULL, NULL, NULL, 0);
+}
+
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 1ce66748a2d2..51ef2b079bfa 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
+#include <linux/mempool.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -42,6 +43,12 @@
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */
+
+/*
+ * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify
+ * which does not support FS_ERROR.
+ */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
@@ -95,7 +102,8 @@
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \
- FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED)
+ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
+ FS_ERROR)
/* Extra flags that may be reported with event or control handling of events */
#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
@@ -136,6 +144,7 @@ struct mem_cgroup;
* @dir: optional directory associated with event -
* if @file_name is not NULL, this is the directory that
* @file_name is relative to.
+ * Either @inode or @dir must be non-NULL.
* @file_name: optional file name associated with event
* @cookie: inotify rename cookie
*
@@ -155,7 +164,7 @@ struct fsnotify_ops {
const struct qstr *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
- void (*free_event)(struct fsnotify_event *event);
+ void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event);
/* called on final put+free to free memory */
void (*free_mark)(struct fsnotify_mark *mark);
};
@@ -238,6 +247,7 @@ struct fsnotify_group {
int flags; /* flags from fanotify_init() */
int f_flags; /* event_f_flags from fanotify_init() */
struct ucounts *ucounts;
+ mempool_t error_events_pool;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
@@ -248,6 +258,14 @@ enum fsnotify_data_type {
FSNOTIFY_EVENT_NONE,
FSNOTIFY_EVENT_PATH,
FSNOTIFY_EVENT_INODE,
+ FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_ERROR,
+};
+
+struct fs_error_report {
+ int error;
+ struct inode *inode;
+ struct super_block *sb;
};
static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
@@ -255,8 +273,25 @@ static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
switch (data_type) {
case FSNOTIFY_EVENT_INODE:
return (struct inode *)data;
+ case FSNOTIFY_EVENT_DENTRY:
+ return d_inode(data);
case FSNOTIFY_EVENT_PATH:
return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *)data)->inode;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_DENTRY:
+ /* Non const is needed for dget() */
+ return (struct dentry *)data;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry;
default:
return NULL;
}
@@ -273,6 +308,35 @@ static inline const struct path *fsnotify_data_path(const void *data,
}
}
+static inline struct super_block *fsnotify_data_sb(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return ((struct inode *)data)->i_sb;
+ case FSNOTIFY_EVENT_DENTRY:
+ return ((struct dentry *)data)->d_sb;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *) data)->sb;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct fs_error_report *fsnotify_data_error_report(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_ERROR:
+ return (struct fs_error_report *) data;
+ default:
+ return NULL;
+ }
+}
+
enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_PARENT,
@@ -482,16 +546,30 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
extern void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_event(struct fsnotify_group *group,
- struct fsnotify_event *event,
- int (*merge)(struct fsnotify_group *,
- struct fsnotify_event *),
- void (*insert)(struct fsnotify_group *,
- struct fsnotify_event *));
+extern int fsnotify_insert_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *),
+ void (*insert)(struct fsnotify_group *,
+ struct fsnotify_event *));
+
+static inline int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *))
+{
+ return fsnotify_insert_event(group, event, merge, NULL);
+}
+
/* Queue overflow event to a notification group */
static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
{
- fsnotify_add_event(group, group->overflow_event, NULL, NULL);
+ fsnotify_add_event(group, group->overflow_event, NULL);
+}
+
+static inline bool fsnotify_is_overflow_event(u32 mask)
+{
+ return mask & FS_Q_OVERFLOW;
}
static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 832e65f06754..9999e29187de 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -30,16 +30,26 @@
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
+#ifdef CONFIG_FUNCTION_TRACER
+struct ftrace_ops;
+struct ftrace_regs;
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
* does. Or at least does enough to prevent any unwelcome side effects.
+ *
+ * Also define the function prototype that these architectures use
+ * to call the ftrace_ops_list_func().
*/
#if !ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
#else
# define FTRACE_FORCE_LIST_FUNC 0
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
#endif
+#endif /* CONFIG_FUNCTION_TRACER */
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
@@ -88,8 +98,6 @@ extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-struct ftrace_ops;
-
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
struct ftrace_regs {
@@ -316,7 +324,12 @@ int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
unsigned long old_addr,
unsigned long new_addr);
unsigned long ftrace_find_rec_direct(unsigned long ip);
+int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
+int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
+int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
+
#else
+struct ftrace_ops;
# define ftrace_direct_func_count 0
static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
{
@@ -346,6 +359,18 @@ static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{
return 0;
}
+static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
@@ -795,6 +820,15 @@ static inline bool is_ftrace_trampoline(unsigned long addr)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef ftrace_graph_func
+#define ftrace_graph_func ftrace_stub
+#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
+#else
+#define FTRACE_OPS_GRAPH_STUB 0
+#endif
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 9f4ad719bfe3..3a532ba66f6c 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/bits.h>
#include <linux/err.h>
struct fwnode_operations;
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index bfd00320c7f3..107613f7d792 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -38,8 +38,9 @@
#include <asm/page.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
struct genradix_root;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 0f5315c2b5a3..74c410263113 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -12,12 +12,10 @@
#include <linux/types.h>
#include <linux/kdev_t.h>
-#include <linux/rcupdate.h>
-#include <linux/slab.h>
-#include <linux/percpu-refcount.h>
#include <linux/uuid.h>
#include <linux/blk_types.h>
-#include <asm/local.h>
+#include <linux/device.h>
+#include <linux/xarray.h>
extern const struct device_type disk_type;
extern struct device_type part_type;
@@ -26,14 +24,6 @@ extern struct class block_class;
#define DISK_MAX_PARTS 256
#define DISK_NAME_LEN 32
-#include <linux/major.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/workqueue.h>
-#include <linux/xarray.h>
-
#define PARTITION_META_INFO_VOLNAMELTH 64
/*
* Enough for the string representation of any kind of UUID plus NULL.
@@ -215,14 +205,16 @@ static inline dev_t disk_devt(struct gendisk *disk)
void disk_uevent(struct gendisk *disk, enum kobject_action action);
/* block/genhd.c */
-int device_add_disk(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups);
-static inline int add_disk(struct gendisk *disk)
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
return device_add_disk(NULL, disk, NULL);
}
extern void del_gendisk(struct gendisk *gp);
+void invalidate_disk(struct gendisk *disk);
+
void set_disk_ro(struct gendisk *disk, bool read_only);
static inline int get_disk_ro(struct gendisk *disk)
@@ -231,6 +223,11 @@ static inline int get_disk_ro(struct gendisk *disk)
test_bit(GD_READ_ONLY, &disk->state);
}
+static inline int bdev_read_only(struct block_device *bdev)
+{
+ return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
+}
+
extern void disk_block_events(struct gendisk *disk);
extern void disk_unblock_events(struct gendisk *disk);
extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
@@ -248,7 +245,12 @@ static inline sector_t get_start_sect(struct block_device *bdev)
static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
- return i_size_read(bdev->bd_inode) >> 9;
+ return bdev->bd_nr_sectors;
+}
+
+static inline loff_t bdev_nr_bytes(struct block_device *bdev)
+{
+ return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}
static inline sector_t get_capacity(struct gendisk *disk)
@@ -256,6 +258,12 @@ static inline sector_t get_capacity(struct gendisk *disk)
return bdev_nr_sectors(disk->part0);
}
+static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
+{
+ return bdev_nr_sectors(sb->s_bdev) >>
+ (sb->s_blocksize_bits - SECTOR_SHIFT);
+}
+
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void blk_drop_partitions(struct gendisk *disk);
@@ -291,10 +299,6 @@ bool bdev_check_media_change(struct block_device *bdev);
int __invalidate_device(struct block_device *bdev, bool kill_dirty);
void set_capacity(struct gendisk *disk, sector_t size);
-/* for drivers/char/raw.c: */
-int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 55b2ec1f965a..b976c4177299 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -520,21 +520,21 @@ static inline void arch_free_page(struct page *page, int order) { }
#ifndef HAVE_ARCH_ALLOC_PAGE
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
-{
- return 0;
-}
-#endif
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
+struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
struct list_head *page_list,
struct page **page_array);
+unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
+ unsigned long nr_pages,
+ struct page **page_array);
+
/* Bulk allocate order-0 pages */
static inline unsigned long
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
@@ -570,6 +570,15 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
return __alloc_pages(gfp_mask, order, nid, NULL);
}
+static inline
+struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
+
+ return __folio_alloc(gfp, order, nid, NULL);
+}
+
/*
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
* prefer the current CPU's closest node. Otherwise node must be valid and
@@ -586,6 +595,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc(gfp_t gfp, unsigned order);
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
int node, bool hugepage);
@@ -596,6 +606,10 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_node(numa_node_id(), gfp_mask, order);
}
+static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
+{
+ return __folio_alloc_node(gfp, order, numa_node_id());
+}
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
alloc_pages(gfp_mask, order)
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
@@ -608,9 +622,9 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index a0f9901dcae6..a673a359e20b 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -168,11 +168,18 @@ struct gpio_irq_chip {
/**
* @parent_handler_data:
+ * @parent_handler_data_array:
*
* Data associated, and passed to, the handler for the parent
- * interrupt.
+ * interrupt. Can either be a single pointer if @per_parent_data
+ * is false, or an array of @num_parents pointers otherwise. If
+ * @per_parent_data is true, @parent_handler_data_array cannot be
+ * NULL.
*/
- void *parent_handler_data;
+ union {
+ void *parent_handler_data;
+ void **parent_handler_data_array;
+ };
/**
* @num_parents:
@@ -204,6 +211,14 @@ struct gpio_irq_chip {
bool threaded;
/**
+ * @per_parent_data:
+ *
+ * True if parent_handler_data_array describes a @num_parents
+ * sized array to be used as parent data.
+ */
+ bool per_parent_data;
+
+ /**
* @init_hw: optional routine to initialize hardware before
* an IRQ chip will be added. This is quite useful when
* a particular driver wants to clear IRQ related registers
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 4aa1031d3e4c..0a0b2b09b1b8 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -73,6 +73,12 @@ static inline void *kmap_local_page(struct page *page)
return __kmap_local_page_prot(page, kmap_prot);
}
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
+}
+
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
{
return __kmap_local_page_prot(page, prot);
@@ -171,6 +177,11 @@ static inline void *kmap_local_page(struct page *page)
return page_address(page);
}
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ return page_address(&folio->page) + offset;
+}
+
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
{
return kmap_local_page(page);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index b4c49f9cc379..25aff0f2ed0b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -97,6 +97,43 @@ static inline void kmap_flush_unused(void);
static inline void *kmap_local_page(struct page *page);
/**
+ * kmap_local_folio - Map a page in this folio for temporary usage
+ * @folio: The folio containing the page.
+ * @offset: The byte offset within the folio which identifies the page.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation::
+ *
+ * addr1 = kmap_local_folio(folio1, offset1);
+ * addr2 = kmap_local_folio(folio2, offset2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ *
+ * Context: Can be invoked from any context.
+ * Return: The virtual address of @offset.
+ */
+static inline void *kmap_local_folio(struct folio *folio, size_t offset);
+
+/**
* kmap_atomic - Atomically map a page for temporary usage - Deprecated!
* @page: Pointer to the page to be mapped
*
@@ -143,9 +180,9 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
#endif
@@ -177,9 +214,9 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
static inline void clear_highpage(struct page *page)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
clear_page(kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
@@ -202,7 +239,7 @@ static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
unsigned int i;
BUG_ON(end1 > page_size(page) || end2 > page_size(page));
@@ -213,7 +250,7 @@ static inline void zero_user_segments(struct page *page,
if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}
@@ -238,11 +275,11 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
@@ -253,11 +290,11 @@ static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f123e15d966e..f280f33ff223 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -251,15 +251,6 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
}
/**
- * thp_head - Head page of a transparent huge page.
- * @page: Any page (tail, head or regular) found in the page cache.
- */
-static inline struct page *thp_head(struct page *page)
-{
- return compound_head(page);
-}
-
-/**
* thp_order - Order of a transparent huge page.
* @page: Head page of a transparent huge page.
*/
@@ -336,12 +327,6 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-static inline struct page *thp_head(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return page;
-}
-
static inline unsigned int thp_order(struct page *page)
{
VM_BUG_ON_PGFLAGS(PageTail(page), page);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1faebe1cd0ed..00351ccb49a3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -124,6 +124,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
void hugepage_put_subpool(struct hugepage_subpool *spool);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
@@ -132,6 +133,10 @@ int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
+int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr, unsigned long new_addr,
+ unsigned long len);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
@@ -143,9 +148,6 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page);
-void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
void hugetlb_show_meminfo(void);
@@ -218,6 +220,10 @@ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
+static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
+{
+}
+
static inline unsigned long hugetlb_total_pages(void)
{
return 0;
@@ -265,6 +271,16 @@ static inline int copy_hugetlb_page_range(struct mm_struct *dst,
return 0;
}
+static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr,
+ unsigned long len)
+{
+ BUG();
+ return 0;
+}
+
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
@@ -385,13 +401,6 @@ static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
BUG();
}
-static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
-{
- BUG();
-}
-
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
unsigned int flags)
@@ -468,8 +477,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct ucounts **ucounts, int creat_flags,
- int page_size_log);
+ int creat_flags, int page_size_log);
static inline bool is_file_hugepages(struct file *file)
{
@@ -488,8 +496,7 @@ static inline struct hstate *hstate_inode(struct inode *i)
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct ucounts **ucounts, int creat_flags,
- int page_size_log)
+ int creat_flags, int page_size_log)
{
return ERR_PTR(-ENOSYS);
}
@@ -596,6 +603,7 @@ struct hstate {
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
+ unsigned int demote_order;
unsigned long mask;
unsigned long max_huge_pages;
unsigned long nr_huge_pages;
@@ -605,6 +613,7 @@ struct hstate {
unsigned long nr_overcommit_huge_pages;
struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int max_huge_pages_node[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
@@ -637,8 +646,9 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct page *page);
/* arch callback */
-int __init __alloc_bootmem_huge_page(struct hstate *h);
-int __init alloc_bootmem_huge_page(struct hstate *h);
+int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
+bool __init hugetlb_node_alloc_supported(void);
void __init hugetlb_add_hstate(unsigned order);
bool __init arch_hugetlb_valid_size(unsigned long size);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index ddc8713ce57b..b823311eac79 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -803,6 +803,12 @@ struct vmbus_device {
#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
+struct vmbus_gpadl {
+ u32 gpadl_handle;
+ u32 size;
+ void *buffer;
+};
+
struct vmbus_channel {
struct list_head listentry;
@@ -822,7 +828,7 @@ struct vmbus_channel {
bool rescind_ref; /* got rescind msg, got channel reference */
struct completion rescind_event;
- u32 ringbuffer_gpadlhandle;
+ struct vmbus_gpadl ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
struct page *ringbuffer_page;
@@ -1100,19 +1106,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
-/*
- * Check if sub-channels have already been offerred. This API will be useful
- * when the driver is unloaded after establishing sub-channels. In this case,
- * when the driver is re-loaded, the driver would have to check if the
- * subchannels have already been established before attempting to request
- * the creation of sub-channels.
- * This function returns TRUE to indicate that subchannels have already been
- * created.
- * This function should be invoked after setting the callback function for
- * sub-channel creation.
- */
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
-
/* The format must be the same as struct vmdata_gpa_direct */
struct vmbus_channel_packet_page_buffer {
u16 type;
@@ -1192,10 +1185,10 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
- u32 *gpadl_handle);
+ struct vmbus_gpadl *gpadl);
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
- u32 gpadl_handle);
+ struct vmbus_gpadl *gpadl);
void vmbus_reset_channel_cb(struct vmbus_channel *channel);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2ce3efbe9198..16119ac1aa97 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -11,6 +11,7 @@
#define _LINUX_I2C_H
#include <linux/acpi.h> /* for acpi_handle */
+#include <linux/bits.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
@@ -223,6 +224,15 @@ enum i2c_alert_protocol {
};
/**
+ * enum i2c_driver_flags - Flags for an I2C device driver
+ *
+ * @I2C_DRV_ACPI_WAIVE_D0_PROBE: Don't put the device in D0 state for probe
+ */
+enum i2c_driver_flags {
+ I2C_DRV_ACPI_WAIVE_D0_PROBE = BIT(0),
+};
+
+/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @probe: Callback for device binding - soon to be deprecated
@@ -236,6 +246,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
+ * @flags: A bitmask of flags defined in &enum i2c_driver_flags
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -294,6 +305,8 @@ struct i2c_driver {
int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
struct list_head clients;
+
+ u32 flags;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -1015,6 +1028,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev);
struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
struct i2c_board_info *info);
struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle);
+bool i2c_acpi_waive_d0_probe(struct device *dev);
#else
static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c)
@@ -1038,6 +1052,10 @@ static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle ha
{
return NULL;
}
+static inline bool i2c_acpi_waive_d0_probe(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_ACPI */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 694264503119..11d7af260f20 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1143,7 +1143,7 @@ struct ieee80211_mgmt {
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
- u8 variable[0];
+ u8 variable[];
} __packed auth;
struct {
__le16 reason_code;
@@ -1152,26 +1152,26 @@ struct ieee80211_mgmt {
__le16 capab_info;
__le16 listen_interval;
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_req;
struct {
__le16 capab_info;
__le16 status_code;
__le16 aid;
/* followed by Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
__le16 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed s1g_assoc_resp, s1g_reassoc_resp;
struct {
__le16 capab_info;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed reassoc_req;
struct {
__le16 reason_code;
@@ -1182,11 +1182,11 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
+ u8 variable[];
} __packed beacon;
struct {
/* only variable items: SSID, Supported rates */
- u8 variable[0];
+ DECLARE_FLEX_ARRAY(u8, variable);
} __packed probe_req;
struct {
__le64 timestamp;
@@ -1194,7 +1194,7 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
+ u8 variable[];
} __packed probe_resp;
struct {
u8 category;
@@ -1203,16 +1203,16 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
u8 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed wme_action;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch;
struct{
u8 action_code;
struct ieee80211_ext_chansw_ie data;
- u8 variable[0];
+ u8 variable[];
} __packed ext_chan_switch;
struct{
u8 action_code;
@@ -1228,7 +1228,7 @@ struct ieee80211_mgmt {
__le16 timeout;
__le16 start_seq_num;
/* followed by BA Extension */
- u8 variable[0];
+ u8 variable[];
} __packed addba_req;
struct{
u8 action_code;
@@ -1244,11 +1244,11 @@ struct ieee80211_mgmt {
} __packed delba;
struct {
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed self_prot;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed mesh_action;
struct {
u8 action;
@@ -1292,7 +1292,7 @@ struct ieee80211_mgmt {
u8 toa[6];
__le16 tod_error;
__le16 toa_error;
- u8 variable[0];
+ u8 variable[];
} __packed ftm;
struct {
u8 action_code;
@@ -1988,6 +1988,44 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
int mcs, bool ext_nss_bw_capable,
unsigned int max_vht_nss);
+/**
+ * enum ieee80211_ap_reg_power - regulatory power for a Access Point
+ *
+ * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
+ * @IEEE80211_REG_LPI: Indoor Access Point
+ * @IEEE80211_REG_SP: Standard power Access Point
+ * @IEEE80211_REG_VLP: Very low power Access Point
+ * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_AP_POWER_MAX: maximum value
+ */
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+ IEEE80211_REG_AP_POWER_AFTER_LAST,
+ IEEE80211_REG_AP_POWER_MAX =
+ IEEE80211_REG_AP_POWER_AFTER_LAST - 1,
+};
+
+/**
+ * enum ieee80211_client_reg_power - regulatory power for a client
+ *
+ * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode
+ * @IEEE80211_REG_DEFAULT_CLIENT: Default Client
+ * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client
+ * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value
+ */
+enum ieee80211_client_reg_power {
+ IEEE80211_REG_UNSET_CLIENT,
+ IEEE80211_REG_DEFAULT_CLIENT,
+ IEEE80211_REG_SUBORDINATE_CLIENT,
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST,
+ IEEE80211_REG_CLIENT_POWER_MAX =
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1,
+};
+
/* 802.11ax HE MAC capabilities */
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
@@ -2084,6 +2122,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
/* 802.11ax HE PHY capabilities */
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index b6928ac5c63d..418b1307d3f2 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,8 +11,15 @@
struct iio_buffer;
+enum iio_buffer_direction {
+ IIO_BUFFER_DIRECTION_IN,
+ IIO_BUFFER_DIRECTION_OUT,
+};
+
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
+
/**
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
* @indio_dev: iio_dev structure for device.
@@ -38,6 +45,10 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
+int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
+ const void *data, size_t data_sz,
+ int64_t timestamp);
+
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index 245b32918ae1..e2ca8ea23e19 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -7,6 +7,7 @@
#ifdef CONFIG_IIO_BUFFER
#include <uapi/linux/iio/buffer.h>
+#include <linux/iio/buffer.h>
struct iio_dev;
struct iio_buffer;
@@ -23,6 +24,10 @@ struct iio_buffer;
* @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
+ * @remove_from: remove scan from buffer. Drivers should calls this to
+ * remove a scan from a buffer.
+ * @write: try to write a number of bytes
+ * @space_available: returns the amount of bytes available in a buffer
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @set_bytes_per_datum:set number of bytes per datum
@@ -49,6 +54,9 @@ struct iio_buffer_access_funcs {
int (*store_to)(struct iio_buffer *buffer, const void *data);
int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
size_t (*data_available)(struct iio_buffer *buffer);
+ int (*remove_from)(struct iio_buffer *buffer, void *data);
+ int (*write)(struct iio_buffer *buffer, size_t n, const char __user *buf);
+ size_t (*space_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
@@ -80,6 +88,9 @@ struct iio_buffer {
/** @bytes_per_datum: Size of individual datum including timestamp. */
size_t bytes_per_datum;
+ /* @direction: Direction of the data stream (in/out). */
+ enum iio_buffer_direction direction;
+
/**
* @access: Buffer access functions associated with the
* implementation.
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 8bdbaf3f3796..22f67845cdd3 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -220,7 +220,6 @@ struct st_sensor_settings {
/**
* struct st_sensor_data - ST sensor device status
- * @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
* @mount_matrix: The mounting matrix of the sensor.
* @sensor_settings: Pointer to the specific sensor settings in use.
@@ -240,7 +239,6 @@ struct st_sensor_settings {
* @buffer_data: Data used by buffer part.
*/
struct st_sensor_data {
- struct device *dev;
struct iio_trigger *trig;
struct iio_mount_matrix mount_matrix;
struct st_sensor_settings *sensor_settings;
@@ -273,7 +271,6 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p);
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops);
-void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
int st_sensors_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
#else
@@ -282,10 +279,6 @@ static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
{
return 0;
}
-static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
-{
- return;
-}
#define st_sensors_validate_device NULL
#endif
@@ -298,8 +291,6 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
int st_sensors_power_enable(struct iio_dev *indio_dev);
-void st_sensors_power_disable(struct iio_dev *indio_dev);
-
int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
@@ -330,21 +321,17 @@ void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
/* Accelerometer */
const struct st_sensor_settings *st_accel_get_settings(const char *name);
int st_accel_common_probe(struct iio_dev *indio_dev);
-void st_accel_common_remove(struct iio_dev *indio_dev);
/* Gyroscope */
const struct st_sensor_settings *st_gyro_get_settings(const char *name);
int st_gyro_common_probe(struct iio_dev *indio_dev);
-void st_gyro_common_remove(struct iio_dev *indio_dev);
/* Magnetometer */
const struct st_sensor_settings *st_magn_get_settings(const char *name);
int st_magn_common_probe(struct iio_dev *indio_dev);
-void st_magn_common_remove(struct iio_dev *indio_dev);
/* Pressure */
const struct st_sensor_settings *st_press_get_settings(const char *name);
int st_press_common_probe(struct iio_dev *indio_dev);
-void st_press_common_remove(struct iio_dev *indio_dev);
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 36de60a5da7a..7a157ed218f6 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -8,6 +8,7 @@
#ifndef _IIO_INKERN_H_
#define _IIO_INKERN_H_
+struct device;
struct iio_dev;
struct iio_map;
@@ -26,4 +27,17 @@ int iio_map_array_register(struct iio_dev *indio_dev,
*/
int iio_map_array_unregister(struct iio_dev *indio_dev);
+/**
+ * devm_iio_map_array_register - device-managed version of iio_map_array_register
+ * @dev: Device object to which to bind the unwinding of this registration
+ * @indio_dev: Pointer to the iio_dev structure
+ * @maps: Pointer to an IIO map object which is to be registered to this IIO device
+ *
+ * This function will call iio_map_array_register() to register an IIO map object
+ * and will also hook a callback to the iio_map_array_unregister() function to
+ * handle de-registration of the IIO map object when the device's refcount goes to
+ * zero.
+ */
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps);
+
#endif
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index c9504e9da571..2be12b7b5dc5 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -23,6 +23,8 @@
* @groupcounter: index of next attribute group
* @legacy_scan_el_group: attribute group for legacy scan elements attribute group
* @legacy_buffer_group: attribute group for legacy buffer attributes group
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned()
+ * @bounce_buffer_size: size of currently allocate bounce buffer
* @scan_index_timestamp: cache of the index to the timestamp
* @clock_id: timestamping clock posix identifier
* @chrdev: associated character device
@@ -50,6 +52,8 @@ struct iio_dev_opaque {
int groupcounter;
struct attribute_group legacy_scan_el_group;
struct attribute_group legacy_buffer_group;
+ void *bounce_buffer;
+ size_t bounce_buffer_size;
unsigned int scan_index_timestamp;
clockid_t clock_id;
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index cf49997d5903..7c02f5292eea 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -49,6 +49,7 @@ struct adis_timeout {
* @status_error_mask: Bitmask of errors supported by the device
* @timeouts: Chip specific delays
* @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
+ * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin
* @has_paging: True if ADIS device has paged registers
* @burst_reg_cmd: Register command that triggers burst
* @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined,
@@ -78,6 +79,7 @@ struct adis_data {
unsigned int status_error_mask;
int (*enable_irq)(struct adis *adis, bool enable);
+ bool unmasked_drdy;
bool has_paging;
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index 7f154d1f8739..7490b05fc5b2 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+#include <linux/iio/buffer.h>
#include <linux/interrupt.h>
struct attribute;
@@ -11,21 +12,27 @@ struct iio_buffer_setup_ops;
int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
const struct iio_buffer_setup_ops *setup_ops,
const struct attribute **buffer_attrs);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
- iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), (setup_ops), NULL)
+ iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, (setup_ops), \
+ NULL)
int devm_iio_triggered_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
const struct iio_buffer_setup_ops *ops,
const struct attribute **buffer_attrs);
#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
- devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), (setup_ops), NULL)
+ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, \
+ (setup_ops), NULL)
#endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index a038feb63f23..518b484a7f07 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -133,6 +133,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \
+ ARP_EVICT_NOCARRIER)
struct in_ifaddr {
struct hlist_node hash;
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
deleted file mode 100644
index ee1d44545f30..000000000000
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CY8CTMG110_PDATA_H
-#define _LINUX_CY8CTMG110_PDATA_H
-
-struct cy8ctmg110_pdata
-{
- int reset_pin; /* Reset pin is wired to this GPIO (optional) */
-};
-
-#endif
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
new file mode 100644
index 000000000000..cda1f706eaeb
--- /dev/null
+++ b/include/linux/instruction_pointer.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INSTRUCTION_POINTER_H
+#define _LINUX_INSTRUCTION_POINTER_H
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 05a65eb155f7..69230fd695ea 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -517,9 +517,6 @@ struct context_entry {
u64 hi;
};
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
-
/*
* When VT-d works in the scalable mode, it allows DMA translation to
* happen through either first level or second level page table. This
@@ -708,9 +705,15 @@ static inline bool dma_pte_superpage(struct dma_pte *pte)
return (pte->val & DMA_PTE_LARGE_PAGE);
}
-static inline int first_pte_in_page(struct dma_pte *pte)
+static inline bool first_pte_in_page(struct dma_pte *pte)
+{
+ return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
+}
+
+static inline int nr_pte_to_next_page(struct dma_pte *pte)
{
- return !((unsigned long)pte & ~VTD_PAGE_MASK);
+ return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
+ (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e9743cfd8585..66a774d2710e 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -132,13 +132,7 @@ io_mapping_init_wc(struct io_mapping *iomap,
iomap->base = base;
iomap->size = size;
-#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
- iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
-#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
-#else
- iomap->prot = pgprot_noncached(PAGE_KERNEL);
-#endif
return iomap;
}
diff --git a/include/linux/io.h b/include/linux/io.h
index 9595151d800d..5fc800390fe4 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -132,6 +132,8 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
@@ -166,4 +168,7 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 24f8489583ca..6d1b08d0ae93 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -313,8 +313,8 @@ int iomap_writepages(struct address_space *mapping,
struct iomap_dio_ops {
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
unsigned flags);
- blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
- loff_t file_offset);
+ void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset);
};
/*
@@ -330,14 +330,20 @@ struct iomap_dio_ops {
*/
#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
+/*
+ * When a page fault occurs, return a partial synchronous result and allow
+ * the caller to retry the rest of the operation after dealing with the page
+ * fault.
+ */
+#define IOMAP_DIO_PARTIAL (1 << 2)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- unsigned int dio_flags);
+ unsigned int dio_flags, size_t done_before);
struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- unsigned int dio_flags);
+ unsigned int dio_flags, size_t done_before);
ssize_t iomap_dio_complete(struct iomap_dio *dio);
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
#ifdef CONFIG_SWAP
struct file;
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 52850a02a3d0..163831a087ef 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -335,4 +335,7 @@ extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
#define GET_DEVICE_ID_MAX_RETRY 5
+/* Helper function for computing the IPMB checksum of some data. */
+unsigned char ipmb_checksum(unsigned char *data, int size);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index deec18b8944a..9277d21c2690 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -39,6 +39,59 @@ struct ipmi_smi;
#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
/*
+ * SMI messages
+ *
+ * When communicating with an SMI, messages come in two formats:
+ *
+ * * Normal (to a BMC over a BMC interface)
+ *
+ * * IPMB (over a IPMB to another MC)
+ *
+ * When normal, commands are sent using the format defined by a
+ * standard message over KCS (NetFn must be even):
+ *
+ * +-----------+-----+------+
+ * | NetFn/LUN | Cmd | Data |
+ * +-----------+-----+------+
+ *
+ * And responses, similarly, with an completion code added (NetFn must
+ * be odd):
+ *
+ * +-----------+-----+------+------+
+ * | NetFn/LUN | Cmd | CC | Data |
+ * +-----------+-----+------+------+
+ *
+ * With normal messages, only commands are sent and only responses are
+ * received.
+ *
+ * In IPMB mode, we are acting as an IPMB device. Commands will be in
+ * the following format (NetFn must be even):
+ *
+ * +-------------+------+-------------+-----+------+
+ * | NetFn/rsLUN | Addr | rqSeq/rqLUN | Cmd | Data |
+ * +-------------+------+-------------+-----+------+
+ *
+ * Responses will using the following format:
+ *
+ * +-------------+------+-------------+-----+------+------+
+ * | NetFn/rqLUN | Addr | rqSeq/rsLUN | Cmd | CC | Data |
+ * +-------------+------+-------------+-----+------+------+
+ *
+ * This is similar to the format defined in the IPMB manual section
+ * 2.11.1 with the checksums and the first address removed. Also, the
+ * address is always the remote address.
+ *
+ * IPMB messages can be commands and responses in both directions.
+ * Received commands are handled as received commands from the message
+ * queue.
+ */
+
+enum ipmi_smi_msg_type {
+ IPMI_SMI_MSG_TYPE_NORMAL = 0,
+ IPMI_SMI_MSG_TYPE_IPMB_DIRECT
+};
+
+/*
* Messages to/from the lower layer. The smi interface will take one
* of these to send. After the send has occurred and a response has
* been received, it will report this same data structure back up to
@@ -54,6 +107,8 @@ struct ipmi_smi;
struct ipmi_smi_msg {
struct list_head link;
+ enum ipmi_smi_msg_type type;
+
long msgid;
void *user_data;
@@ -73,6 +128,10 @@ struct ipmi_smi_msg {
struct ipmi_smi_handlers {
struct module *owner;
+ /* Capabilities of the SMI. */
+#define IPMI_SMI_CAN_HANDLE_IPMB_DIRECT (1 << 0)
+ unsigned int flags;
+
/*
* The low-level interface cannot start sending messages to
* the upper layer until this function is called. This may
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ef4a69865737..20c1f968da7c 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -79,6 +79,7 @@ struct ipv6_devconf {
__u32 ioam6_id;
__u32 ioam6_id_wide;
__u8 ioam6_enabled;
+ __u8 ndisc_evict_nocarrier;
struct ctl_table_header *sysctl_header;
};
@@ -282,7 +283,6 @@ struct ipv6_pinfo {
__be32 rcv_flowinfo;
__u32 dst_cookie;
- __u32 rx_dst_cookie;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c8293c817646..848e1e12c5c6 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -524,9 +524,10 @@ struct irq_chip {
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
-
+#endif
void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);
@@ -606,8 +607,10 @@ struct irqaction;
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
+#endif
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
@@ -1261,6 +1264,7 @@ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
* top-level IRQ handler.
*/
extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
+asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
#else
#ifndef set_handle_irq
#define set_handle_irq(handle_irq) \
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index ec2a47a81e42..8cd11a223260 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -3,6 +3,7 @@
#define _LINUX_IRQ_WORK_H
#include <linux/smp_types.h>
+#include <linux/rcuwait.h>
/*
* An entry can be in one of four states:
@@ -16,11 +17,13 @@
struct irq_work {
struct __call_single_node node;
void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
};
#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
.node = { .u_flags = (_flags), }, \
.func = (_func), \
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
}
#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
@@ -46,6 +49,11 @@ static inline bool irq_work_is_busy(struct irq_work *work)
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}
+static inline bool irq_work_is_hard(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
+}
+
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 67351aac65ef..3a091d0710ae 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -14,8 +14,15 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
+/* Undefined on purpose */
+extern of_irq_init_cb_t typecheck_irq_init_cb;
+
+#define typecheck_irq_init_cb(fn) \
+ (__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
+
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their DT compatible string and their
@@ -23,24 +30,27 @@
*
* @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
- * @compstr: compatible string of the irqchip driver
+ * @compat: compatible string of the irqchip driver
* @fn: initialization function
*/
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+#define IRQCHIP_DECLARE(name, compat, fn) \
+ OF_DECLARE_2(irqchip, name, compat, typecheck_irq_init_cb(fn))
extern int platform_irqchip_probe(struct platform_device *pdev);
#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \
static const struct of_device_id drv_name##_irqchip_match_table[] = {
-#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, .data = fn },
+#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
+ .data = typecheck_irq_init_cb(fn), },
#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \
{}, \
}; \
MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
-static struct platform_driver drv_name##_driver = { \
- .probe = platform_irqchip_probe, \
+static struct platform_driver drv_name##_driver = { \
+ .probe = IS_ENABLED(CONFIG_IRQCHIP) ? \
+ platform_irqchip_probe : NULL, \
.driver = { \
.name = #drv_name, \
.owner = THIS_MODULE, \
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 59aea39785bf..93d270ca0c56 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -168,14 +168,7 @@ int generic_handle_irq(unsigned int irq);
* conversion failed.
*/
int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
-
-#ifdef CONFIG_HANDLE_DOMAIN_IRQ
-int handle_domain_irq(struct irq_domain *domain,
- unsigned int hwirq, struct pt_regs *regs);
-
-int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
- struct pt_regs *regs);
-#endif
+int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 9ee238ad29ce..553da4899f55 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -64,6 +64,10 @@ struct irq_fwspec {
u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
+/* Conversion function from of_phandle_args fields to fwspec */
+void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
+ unsigned int count, struct irq_fwspec *fwspec);
+
/*
* Should several domains have the same device node, but serve
* different purposes (for example one domain is for PCI/MSI, and the
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index a1d6fc82d7f0..4176c7eca7b5 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -24,25 +24,16 @@
struct cred;
struct module;
-static inline int is_kernel_inittext(unsigned long addr)
-{
- if (addr >= (unsigned long)_sinittext
- && addr <= (unsigned long)_einittext)
- return 1;
- return 0;
-}
-
static inline int is_kernel_text(unsigned long addr)
{
- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
- arch_is_kernel_text(addr))
+ if (__is_kernel_text(addr))
return 1;
return in_gate_area_no_mm(addr);
}
static inline int is_kernel(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ if (__is_kernel(addr))
return 1;
return in_gate_area_no_mm(addr);
}
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index dd874a1ee862..d8783b682669 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -89,7 +89,7 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled);
}
-static inline bool kasan_has_integrated_init(void)
+static inline bool kasan_hw_tags_enabled(void)
{
return kasan_enabled();
}
@@ -104,7 +104,7 @@ static inline bool kasan_enabled(void)
return IS_ENABLED(CONFIG_KASAN);
}
-static inline bool kasan_has_integrated_init(void)
+static inline bool kasan_hw_tags_enabled(void)
{
return false;
}
@@ -125,6 +125,11 @@ static __always_inline void kasan_free_pages(struct page *page,
#endif /* CONFIG_KASAN_HW_TAGS */
+static inline bool kasan_has_integrated_init(void)
+{
+ return kasan_hw_tags_enabled();
+}
+
#ifdef CONFIG_KASAN
struct kasan_cache {
@@ -370,12 +375,14 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
+void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
+static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
@@ -434,6 +441,8 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+
#else /* CONFIG_KASAN_VMALLOC */
static inline int kasan_populate_vmalloc(unsigned long start,
@@ -451,6 +460,10 @@ static inline void kasan_release_vmalloc(unsigned long start,
unsigned long free_region_start,
unsigned long free_region_end) {}
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 9fd0ad80fef6..5f5965246877 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */
struct kcsan_scoped_access {
struct list_head list;
+ /* Access information. */
const volatile void *ptr;
size_t size;
int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
};
/*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2776423a587e..77755ac3e189 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -9,6 +9,7 @@
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/bitops.h>
#include <linux/kstrtox.h>
#include <linux/log2.h>
@@ -19,6 +20,7 @@
#include <linux/printk.h>
#include <linux/build_bug.h>
#include <linux/static_call_types.h>
+#include <linux/instruction_pointer.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
@@ -52,11 +54,6 @@
} \
)
-#define typeof_member(T, m) typeof(((T*)0)->m)
-
-#define _RET_IP_ (unsigned long)__builtin_return_address(0)
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-
/**
* upper_32_bits - return bits 32-63 of a number
* @n: the number we're accessing
@@ -88,7 +85,7 @@
struct completion;
struct user;
-#ifdef CONFIG_PREEMPT_VOLUNTARY
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
extern int __cond_resched(void);
# define might_resched() __cond_resched()
@@ -111,8 +108,8 @@ static __always_inline void might_resched(void)
#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-extern void ___might_sleep(const char *file, int line, int preempt_offset);
-extern void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_resched(const char *file, int line, unsigned int offsets);
+extern void __might_sleep(const char *file, int line);
extern void __cant_sleep(const char *file, int line, int preempt_offset);
extern void __cant_migrate(const char *file, int line);
@@ -129,7 +126,7 @@ extern void __cant_migrate(const char *file, int line);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
/**
* cant_sleep - annotation for functions that cannot sleep
*
@@ -168,10 +165,9 @@ extern void __cant_migrate(const char *file, int line);
*/
# define non_block_end() WARN_ON(current->non_block_count-- == 0)
#else
- static inline void ___might_sleep(const char *file, int line,
- int preempt_offset) { }
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
+ static inline void __might_resched(const char *file, int line,
+ unsigned int offsets) { }
+static inline void __might_sleep(const char *file, int line) { }
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
# define cant_migrate() do { } while (0)
@@ -229,8 +225,6 @@ extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
-extern int init_kernel_text(unsigned long addr);
-extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
@@ -248,6 +242,7 @@ extern bool early_boot_irqs_disabled;
extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
+ SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
@@ -483,36 +478,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#define __CONCAT(a, b) a ## b
#define CONCATENATE(a, b) __CONCAT(a, b)
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type *)(__mptr - offsetof(type, member))); })
-
-/**
- * container_of_safe - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
- */
-#define container_of_safe(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
- ((type *)(__mptr - offsetof(type, member))); })
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 44ae1a7eb9e3..69ae6b278464 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -102,6 +102,7 @@ extern void account_system_index_time(struct task_struct *, u64,
enum cpu_usage_stat);
extern void account_steal_time(u64);
extern void account_idle_time(u64);
+extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline void account_process_tick(struct task_struct *tsk, int user)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 1093abf7c28c..3ccce6f24548 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -269,10 +269,6 @@ struct kernfs_ops {
struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
};
/*
@@ -568,30 +564,6 @@ kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
priv, NULL);
}
-static inline struct kernfs_node *
-kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, kuid_t uid, kgid_t gid,
- loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns)
-{
- struct lock_class_key *key = NULL;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- key = (struct lock_class_key *)&ops->lockdep_key;
-#endif
- return __kernfs_create_file(parent, name, mode, uid, gid,
- size, ops, priv, ns, key);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
- loff_t size, const struct kernfs_ops *ops, void *priv)
-{
- return kernfs_create_file_ns(parent, name, mode,
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
- size, ops, priv, NULL);
-}
-
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
const char *name)
{
diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h
deleted file mode 100644
index a27605e2f826..000000000000
--- a/include/linux/keyslot-manager.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2019 Google LLC
- */
-
-#ifndef __LINUX_KEYSLOT_MANAGER_H
-#define __LINUX_KEYSLOT_MANAGER_H
-
-#include <linux/bio.h>
-#include <linux/blk-crypto.h>
-
-struct blk_keyslot_manager;
-
-/**
- * struct blk_ksm_ll_ops - functions to manage keyslots in hardware
- * @keyslot_program: Program the specified key into the specified slot in the
- * inline encryption hardware.
- * @keyslot_evict: Evict key from the specified keyslot in the hardware.
- * The key is provided so that e.g. dm layers can evict
- * keys from the devices that they map over.
- * Returns 0 on success, -errno otherwise.
- *
- * This structure should be provided by storage device drivers when they set up
- * a keyslot manager - this structure holds the function ptrs that the keyslot
- * manager will use to manipulate keyslots in the hardware.
- */
-struct blk_ksm_ll_ops {
- int (*keyslot_program)(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- unsigned int slot);
- int (*keyslot_evict)(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- unsigned int slot);
-};
-
-struct blk_keyslot_manager {
- /*
- * The struct blk_ksm_ll_ops that this keyslot manager will use
- * to perform operations like programming and evicting keys on the
- * device
- */
- struct blk_ksm_ll_ops ksm_ll_ops;
-
- /*
- * The maximum number of bytes supported for specifying the data unit
- * number.
- */
- unsigned int max_dun_bytes_supported;
-
- /*
- * Array of size BLK_ENCRYPTION_MODE_MAX of bitmasks that represents
- * whether a crypto mode and data unit size are supported. The i'th
- * bit of crypto_mode_supported[crypto_mode] is set iff a data unit
- * size of (1 << i) is supported. We only support data unit sizes
- * that are powers of 2.
- */
- unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
-
- /* Device for runtime power management (NULL if none) */
- struct device *dev;
-
- /* Here onwards are *private* fields for internal keyslot manager use */
-
- unsigned int num_slots;
-
- /* Protects programming and evicting keys from the device */
- struct rw_semaphore lock;
-
- /* List of idle slots, with least recently used slot at front */
- wait_queue_head_t idle_slots_wait_queue;
- struct list_head idle_slots;
- spinlock_t idle_slots_lock;
-
- /*
- * Hash table which maps struct *blk_crypto_key to keyslots, so that we
- * can find a key's keyslot in O(1) time rather than O(num_slots).
- * Protected by 'lock'.
- */
- struct hlist_head *slot_hashtable;
- unsigned int log_slot_ht_size;
-
- /* Per-keyslot data */
- struct blk_ksm_keyslot *slots;
-};
-
-int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots);
-
-int devm_blk_ksm_init(struct device *dev, struct blk_keyslot_manager *ksm,
- unsigned int num_slots);
-
-blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- struct blk_ksm_keyslot **slot_ptr);
-
-unsigned int blk_ksm_get_slot_idx(struct blk_ksm_keyslot *slot);
-
-void blk_ksm_put_slot(struct blk_ksm_keyslot *slot);
-
-bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_config *cfg);
-
-int blk_ksm_evict_key(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key);
-
-void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm);
-
-void blk_ksm_destroy(struct blk_keyslot_manager *ksm);
-
-void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
- const struct blk_keyslot_manager *child);
-
-void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm);
-
-bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset,
- struct blk_keyslot_manager *ksm_subset);
-
-void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm,
- struct blk_keyslot_manager *reference_ksm);
-
-#endif /* __LINUX_KEYSLOT_MANAGER_H */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 3fe6dd8a18c1..4b5e3679a72c 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -14,6 +14,9 @@
#ifdef CONFIG_KFENCE
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
@@ -22,13 +25,8 @@
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
extern char *__kfence_pool;
-#ifdef CONFIG_KFENCE_STATIC_KEYS
-#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
-#else
-#include <linux/atomic.h>
extern atomic_t kfence_allocation_gate;
-#endif
/**
* is_kfence_address() - check if an address belongs to KFENCE pool
@@ -116,13 +114,16 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
*/
static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
-#ifdef CONFIG_KFENCE_STATIC_KEYS
- if (static_branch_unlikely(&kfence_allocation_key))
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+ if (!static_branch_unlikely(&kfence_allocation_key))
+ return NULL;
#else
- if (unlikely(!atomic_read(&kfence_allocation_gate)))
+ if (!static_branch_likely(&kfence_allocation_key))
+ return NULL;
#endif
- return __kfence_alloc(s, size, flags);
- return NULL;
+ if (likely(atomic_read(&kfence_allocation_gate)))
+ return NULL;
+ return __kfence_alloc(s, size, flags);
}
/**
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index ea30529fba08..efd56f990a46 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -101,7 +101,6 @@ int kobject_init_and_add(struct kobject *kobj,
extern void kobject_del(struct kobject *kobj);
-extern struct kobject * __must_check kobject_create(void);
extern struct kobject * __must_check kobject_create_and_add(const char *name,
struct kobject *parent);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e4f3bfe08757..e974caf39d3e 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -3,7 +3,6 @@
#define _LINUX_KPROBES_H
/*
* Kernel Probes (KProbes)
- * include/linux/kprobes.h
*
* Copyright (C) IBM Corporation, 2002, 2004
*
@@ -39,7 +38,7 @@
#define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008
-#else /* CONFIG_KPROBES */
+#else /* !CONFIG_KPROBES */
#include <asm-generic/kprobes.h>
typedef int kprobe_opcode_t;
struct arch_specific_insn {
@@ -105,25 +104,25 @@ struct kprobe {
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
/* Has this kprobe gone ? */
-static inline int kprobe_gone(struct kprobe *p)
+static inline bool kprobe_gone(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_GONE;
}
/* Is this kprobe disabled ? */
-static inline int kprobe_disabled(struct kprobe *p)
+static inline bool kprobe_disabled(struct kprobe *p)
{
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
/* Is this kprobe really running optimized path ? */
-static inline int kprobe_optimized(struct kprobe *p)
+static inline bool kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* Is this kprobe uses ftrace ? */
-static inline int kprobe_ftrace(struct kprobe *p)
+static inline bool kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
@@ -181,14 +180,6 @@ struct kprobe_blacklist_entry {
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-/*
- * For #ifdef avoidance:
- */
-static inline int kprobes_built_in(void)
-{
- return 1;
-}
-
extern void kprobe_busy_begin(void);
extern void kprobe_busy_end(void);
@@ -197,15 +188,26 @@ extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs);
extern int arch_trampoline_kprobe(struct kprobe *p);
+void arch_kretprobe_fixup_return(struct pt_regs *regs,
+ kprobe_opcode_t *correct_ret_addr);
+
+void __kretprobe_trampoline(void);
+/*
+ * Since some architecture uses structured function pointer,
+ * use dereference_function_descriptor() to get real function address.
+ */
+static nokprobe_inline void *kretprobe_trampoline_addr(void)
+{
+ return dereference_kernel_function_descriptor(__kretprobe_trampoline);
+}
+
/* If the trampoline handler called from a kprobe, use this version */
unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
- void *trampoline_address,
- void *frame_pointer);
+ void *frame_pointer);
static nokprobe_inline
unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
- void *trampoline_address,
- void *frame_pointer)
+ void *frame_pointer)
{
unsigned long ret;
/*
@@ -214,7 +216,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
* be running at this point.
*/
kprobe_busy_begin();
- ret = __kretprobe_trampoline_handler(regs, trampoline_address, frame_pointer);
+ ret = __kretprobe_trampoline_handler(regs, frame_pointer);
kprobe_busy_end();
return ret;
@@ -228,7 +230,7 @@ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance
return READ_ONCE(ri->rph->rp);
}
-#else /* CONFIG_KRETPROBES */
+#else /* !CONFIG_KRETPROBES */
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -239,11 +241,15 @@ static inline int arch_trampoline_kprobe(struct kprobe *p)
}
#endif /* CONFIG_KRETPROBES */
+/* Markers of '_kprobe_blacklist' section */
+extern unsigned long __start_kprobe_blacklist[];
+extern unsigned long __stop_kprobe_blacklist[];
+
extern struct kretprobe_blackpoint kretprobe_blacklist[];
#ifdef CONFIG_KPROBES_SANITY_TEST
extern int init_test_probes(void);
-#else
+#else /* !CONFIG_KPROBES_SANITY_TEST */
static inline int init_test_probes(void)
{
return 0;
@@ -303,7 +309,7 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
unsigned long *value, char *type, char *sym);
-#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
#define DEFINE_INSN_CACHE_OPS(__name) \
static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
@@ -334,7 +340,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr);
+ kprobe_opcode_t *addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
@@ -345,18 +351,22 @@ extern int sysctl_kprobes_optimization;
extern int proc_kprobes_optimization_handler(struct ctl_table *table,
int write, void *buffer,
size_t *length, loff_t *ppos);
-#endif
+#endif /* CONFIG_SYSCTL */
extern void wait_for_kprobe_optimizer(void);
-#else
+#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
+
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
-#endif
-
-int arch_check_ftrace_location(struct kprobe *p);
+#else
+static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
@@ -364,7 +374,7 @@ struct kprobe *get_kprobe(void *addr);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
- return (__this_cpu_read(current_kprobe));
+ return __this_cpu_read(current_kprobe);
}
static inline void reset_current_kprobe(void)
@@ -382,7 +392,6 @@ int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
@@ -410,10 +419,6 @@ int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
char *type, char *sym);
#else /* !CONFIG_KPROBES: */
-static inline int kprobes_built_in(void)
-{
- return 0;
-}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
return 0;
@@ -428,11 +433,11 @@ static inline struct kprobe *kprobe_running(void)
}
static inline int register_kprobe(struct kprobe *p)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kprobes(struct kprobe **kps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kprobe(struct kprobe *p)
{
@@ -442,11 +447,11 @@ static inline void unregister_kprobes(struct kprobe **kps, int num)
}
static inline int register_kretprobe(struct kretprobe *rp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kretprobes(struct kretprobe **rps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kretprobe(struct kretprobe *rp)
{
@@ -462,11 +467,11 @@ static inline void kprobe_free_init_mem(void)
}
static inline int disable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int enable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline bool within_kprobe_blacklist(unsigned long addr)
@@ -479,6 +484,7 @@ static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
#endif /* CONFIG_KPROBES */
+
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -493,19 +499,42 @@ static inline bool is_kprobe_insn_slot(unsigned long addr)
{
return false;
}
-#endif
+#endif /* !CONFIG_KPROBES */
+
#ifndef CONFIG_OPTPROBES
static inline bool is_kprobe_optinsn_slot(unsigned long addr)
{
return false;
}
+#endif /* !CONFIG_OPTPROBES */
+
+#ifdef CONFIG_KRETPROBES
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return (void *)addr == kretprobe_trampoline_addr();
+}
+
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur);
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return false;
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return 0;
+}
#endif
/* Returns true if kprobes handled the fault */
static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
unsigned int trap)
{
- if (!kprobes_built_in())
+ if (!IS_ENABLED(CONFIG_KPROBES))
return false;
if (user_mode(regs))
return false;
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 161e8164abcf..a38a5bca1ba5 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -52,7 +52,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
-void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */
@@ -83,7 +83,7 @@ static inline void rmap_walk_ksm(struct page *page,
{
}
-static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
}
#endif /* CONFIG_MMU */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0f18df7fe874..9e0667e3723e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -39,8 +39,8 @@
#include <asm/kvm_host.h>
#include <linux/kvm_dirty_ring.h>
-#ifndef KVM_MAX_VCPU_ID
-#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#ifndef KVM_MAX_VCPU_IDS
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
#endif
/*
@@ -150,7 +150,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UNBLOCK 2
#define KVM_REQ_UNHALT 3
-#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
@@ -160,8 +160,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except,
- unsigned long *vcpu_bitmap, cpumask_var_t tmp);
+ unsigned long *vcpu_bitmap);
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except);
@@ -618,6 +617,7 @@ struct kvm {
unsigned int max_halt_poll_ns;
u32 dirty_ring_size;
bool vm_bugged;
+ bool vm_dead;
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
struct notifier_block pm_notifier;
@@ -651,12 +651,19 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline void kvm_vm_dead(struct kvm *kvm)
+{
+ kvm->vm_dead = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
+}
+
static inline void kvm_vm_bugged(struct kvm *kvm)
{
kvm->vm_bugged = true;
- kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED);
+ kvm_vm_dead(kvm);
}
+
#define KVM_BUG(cond, kvm, fmt...) \
({ \
int __ret = (cond); \
@@ -1082,10 +1089,17 @@ static inline struct kvm *kvm_arch_alloc_vm(void)
{
return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}
+#endif
+
+static inline void __kvm_arch_free_vm(struct kvm *kvm)
+{
+ kvfree(kvm);
+}
+#ifndef __KVM_HAVE_ARCH_VM_FREE
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
- kfree(kvm);
+ __kvm_arch_free_vm(kvm);
}
#endif
@@ -1765,6 +1779,8 @@ void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
+ struct kvm_kernel_irq_routing_entry *);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
diff --git a/include/linux/leds.h b/include/linux/leds.h
index a0b730be40ad..ba4861ec73d3 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -360,7 +360,7 @@ struct led_trigger {
struct led_hw_trigger_type *trigger_type;
/* LEDs under control by this trigger (for simple triggers) */
- rwlock_t leddev_list_lock;
+ spinlock_t leddev_list_lock;
struct list_head led_cdevs;
/* Link to next registered trigger */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c0c64f03e107..2a8404b26083 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -394,7 +394,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -427,6 +427,7 @@ enum {
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
+ ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -676,6 +677,18 @@ struct ata_ering {
struct ata_ering_entry ring[ATA_ERING_SIZE];
};
+struct ata_cpr {
+ u8 num;
+ u8 num_storage_elements;
+ u64 start_lba;
+ u64 num_lbas;
+};
+
+struct ata_cpr_log {
+ u8 nr_cpr;
+ struct ata_cpr cpr[];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
@@ -735,6 +748,9 @@ struct ata_device {
u32 zac_zones_optimal_nonseq;
u32 zac_zones_max_open;
+ /* Concurrent positioning ranges */
+ struct ata_cpr_log *cpr_log;
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
@@ -1388,7 +1404,7 @@ extern int ata_link_nr_enabled(struct ata_link *link);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
-extern struct device_attribute *ata_common_sdev_attrs[];
+extern const struct attribute_group *ata_common_sdev_groups[];
/*
* All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
@@ -1418,14 +1434,14 @@ extern struct device_attribute *ata_common_sdev_attrs[];
#define ATA_BASE_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
- .sdev_attrs = ata_common_sdev_attrs
+ .sdev_groups = ata_common_sdev_groups
#ifdef CONFIG_SATA_HOST
-extern struct device_attribute *ata_ncq_sdev_attrs[];
+extern const struct attribute_group *ata_ncq_sdev_groups[];
#define ATA_NCQ_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
- .sdev_attrs = ata_ncq_sdev_attrs, \
+ .sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
#endif
diff --git a/include/linux/list.h b/include/linux/list.h
index f2af4b4aa4e9..6636fc07f918 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -2,11 +2,13 @@
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
+#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
-#include <linux/kernel.h>
+
+#include <asm/barrier.h>
/*
* Circular doubly linked list implementation.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 24f207b0190b..85bda2d02d65 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -49,7 +49,9 @@
*/
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
struct llist_head {
struct llist_node *first;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index a98309c0121c..398f70093cd3 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -96,18 +96,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
+bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index 5ae766f26e04..9a6b55da8fd6 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -22,21 +22,20 @@
#define nlm4_fbig cpu_to_be32(NLM_FBIG)
#define nlm4_failed cpu_to_be32(NLM_FAILED)
-
-
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
+bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
extern const struct rpc_version nlm_version4;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9fe165beb0f9..467b94257105 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -481,23 +481,6 @@ do { \
#endif /* CONFIG_LOCK_STAT */
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 3e726ace5c62..d22430840b53 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -21,7 +21,7 @@ enum lockdep_wait_type {
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
- LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
+ LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */
#else
LD_WAIT_CONFIG = LD_WAIT_SPIN,
#endif
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 2adeea44c0d5..df8de62f4710 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -26,13 +26,13 @@
* #undef LSM_HOOK
* };
*/
-LSM_HOOK(int, 0, binder_set_context_mgr, struct task_struct *mgr)
-LSM_HOOK(int, 0, binder_transaction, struct task_struct *from,
- struct task_struct *to)
-LSM_HOOK(int, 0, binder_transfer_binder, struct task_struct *from,
- struct task_struct *to)
-LSM_HOOK(int, 0, binder_transfer_file, struct task_struct *from,
- struct task_struct *to, struct file *file)
+LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr)
+LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
+ const struct cred *to, struct file *file)
LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
unsigned int mode)
LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
@@ -83,7 +83,8 @@ LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val,
LSM_HOOK(int, 0, move_mount, const struct path *from_path,
const struct path *to_path)
LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry,
- int mode, const struct qstr *name, void **ctx, u32 *ctxlen)
+ int mode, const struct qstr *name, const char **xattr_name,
+ void **ctx, u32 *ctxlen)
LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
struct qstr *name, const struct cred *old, struct cred *new)
@@ -328,11 +329,11 @@ LSM_HOOK(int, 0, tun_dev_create, void)
LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
LSM_HOOK(int, 0, tun_dev_open, void *security)
-LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep,
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc,
struct sk_buff *skb)
LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
struct sockaddr *address, int addrlen)
-LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep,
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
struct sock *sk, struct sock *newsk)
#endif /* CONFIG_SECURITY_NETWORK */
@@ -402,3 +403,8 @@ LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
+LSM_HOOK(int, 0, uring_sqpoll, void)
+#endif /* CONFIG_IO_URING */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 5c4c5c0602cb..d45b6f6e27fd 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -196,6 +196,9 @@
* @dentry dentry to use in calculating the context.
* @mode mode used to determine resource type.
* @name name of the last path component used to create file
+ * @xattr_name pointer to place the pointer to security xattr name.
+ * Caller does not have to free the resulting pointer. Its
+ * a pointer to static string.
* @ctx pointer to place the pointer to the resulting context in.
* @ctxlen point to place the length of the resulting context.
* @dentry_create_files_as:
@@ -1024,9 +1027,9 @@
* Security hooks for SCTP
*
* @sctp_assoc_request:
- * Passes the @ep and @chunk->skb of the association INIT packet to
+ * Passes the @asoc and @chunk->skb of the association INIT packet to
* the security module.
- * @ep pointer to sctp endpoint structure.
+ * @asoc pointer to sctp association structure.
* @skb pointer to skbuff of association packet.
* Return 0 on success, error on failure.
* @sctp_bind_connect:
@@ -1044,9 +1047,9 @@
* Called whenever a new socket is created by accept(2) (i.e. a TCP
* style socket) or when a socket is 'peeled off' e.g userspace
* calls sctp_peeloff(3).
- * @ep pointer to current sctp endpoint structure.
+ * @asoc pointer to current sctp association structure.
* @sk pointer to current sock structure.
- * @sk pointer to new sock structure.
+ * @newsk pointer to new sock structure.
*
* Security hooks for Infiniband
*
@@ -1313,22 +1316,22 @@
*
* @binder_set_context_mgr:
* Check whether @mgr is allowed to be the binder context manager.
- * @mgr contains the task_struct for the task being registered.
+ * @mgr contains the struct cred for the current binder process.
* Return 0 if permission is granted.
* @binder_transaction:
* Check whether @from is allowed to invoke a binder transaction call
* to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
+ * @from contains the struct cred for the sending process.
+ * @to contains the struct cred for the receiving process.
* @binder_transfer_binder:
* Check whether @from is allowed to transfer a binder reference to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
+ * @from contains the struct cred for the sending process.
+ * @to contains the struct cred for the receiving process.
* @binder_transfer_file:
* Check whether @from is allowed to transfer @file to @to.
- * @from contains the task_struct for the sending task.
+ * @from contains the struct cred for the sending process.
* @file contains the struct file being transferred.
- * @to contains the task_struct for the receiving task.
+ * @to contains the struct cred for the receiving process.
*
* @ptrace_access_check:
* Check permission before allowing the current process to trace the
@@ -1557,6 +1560,19 @@
* Read perf_event security info if allowed.
* @perf_event_write:
* Write perf_event security info if allowed.
+ *
+ * Security hooks for io_uring
+ *
+ * @uring_override_creds:
+ * Check if the current task, executing an io_uring operation, is allowed
+ * to override it's credentials with @new.
+ *
+ * @new: the new creds to use
+ *
+ * @uring_sqpoll:
+ * Check whether the current task is allowed to spawn a io_uring polling
+ * thread (IORING_SETUP_SQPOLL).
+ *
*/
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 68427e8fadeb..15d03f6532d0 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -18,7 +18,6 @@ struct mdev_device {
void *driver_data;
struct list_head next;
struct mdev_type *type;
- struct device *iommu_device;
bool active;
};
@@ -27,25 +26,6 @@ static inline struct mdev_device *to_mdev_device(struct device *dev)
return container_of(dev, struct mdev_device, dev);
}
-/*
- * Called by the parent device driver to set the device which represents
- * this mdev in iommu protection scope. By default, the iommu device is
- * NULL, that indicates using vendor defined isolation.
- *
- * @dev: the mediated device that iommu will isolate.
- * @iommu_device: a pci device which represents the iommu for @dev.
- */
-static inline void mdev_set_iommu_device(struct mdev_device *mdev,
- struct device *iommu_device)
-{
- mdev->iommu_device = iommu_device;
-}
-
-static inline struct device *mdev_get_iommu_device(struct mdev_device *mdev)
-{
- return mdev->iommu_device;
-}
-
unsigned int mdev_get_type_group_id(struct mdev_device *mdev);
unsigned int mtype_get_type_group_id(struct mdev_type *mtype);
struct device *mtype_get_parent_dev(struct mdev_type *mtype);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 5e6dc38f418e..9f3587a61e14 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -349,6 +349,32 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set);
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+
+static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
static inline u32 mdiobus_c45_addr(int devad, u16 regnum)
{
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 5c4a18a91f89..ae4526389261 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -16,10 +16,6 @@
#include <asm/mem_encrypt.h>
-#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-
-static inline bool mem_encrypt_active(void) { return false; }
-
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
#ifdef CONFIG_AMD_MEM_ENCRYPT
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 34de69b3b8ba..8adcf1fa8096 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -28,17 +28,26 @@ extern unsigned long long max_possible_pfn;
/**
* enum memblock_flags - definition of memory region attributes
* @MEMBLOCK_NONE: no special request
- * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
+ * map during early boot as hot(un)pluggable system RAM (e.g., memory range
+ * that might get hotunplugged later). With "movable_node" set on the kernel
+ * commandline, try keeping this memory region hotunpluggable. Does not apply
+ * to memblocks added ("hotplugged") after early boot.
* @MEMBLOCK_MIRROR: mirrored region
* @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
* reserved in the memory map; refer to memblock_mark_nomap() description
* for further details
+ * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
+ * via a driver, and never indicated in the firmware-provided memory map as
+ * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
+ * kernel resource tree.
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
};
/**
@@ -100,10 +109,11 @@ static inline void memblock_discard(void) {}
#endif
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
-int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_phys_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
@@ -118,7 +128,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
void memblock_free_all(void);
-void memblock_free_ptr(void *ptr, size_t size);
+void memblock_free(void *ptr, size_t size);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
@@ -133,7 +143,7 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+void memblock_free_late(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
@@ -208,7 +218,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
+ p_start, p_end, NULL)
/**
* for_each_mem_range_rev - reverse iterate through memblock areas from
@@ -219,7 +230,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range_rev(i, p_start, p_end) \
__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
+ p_start, p_end, NULL)
/**
* for_each_reserved_mem_range - iterate over all reserved memblock areas
@@ -249,6 +261,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
+static inline bool memblock_is_driver_managed(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_DRIVER_MANAGED;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
@@ -372,7 +389,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-#define MEMBLOCK_ALLOC_KASAN 1
+#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0
@@ -441,23 +458,6 @@ static inline void *memblock_alloc_node(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void memblock_free_early(phys_addr_t base,
- phys_addr_t size)
-{
- memblock_free(base, size);
-}
-
-static inline void memblock_free_early_nid(phys_addr_t base,
- phys_addr_t size, int nid)
-{
- memblock_free(base, size);
-}
-
-static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
/*
* Set the allocation direction to bottom-up or top-down.
*/
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3096c9a0ee01..0c5c403f4be6 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -180,12 +180,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-enum memcg_kmem_state {
- KMEM_NONE,
- KMEM_ALLOCATED,
- KMEM_ONLINE,
-};
-
#if defined(CONFIG_SMP)
struct memcg_padding {
char x[0];
@@ -318,7 +312,6 @@ struct mem_cgroup {
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
- enum memcg_kmem_state kmem_state;
struct obj_cgroup __rcu *objcg;
struct list_head objcg_list; /* list of inherited objcgs */
#endif
@@ -369,7 +362,7 @@ enum page_memcg_data_flags {
#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
-static inline bool PageMemcgKmem(struct page *page);
+static inline bool folio_memcg_kmem(struct folio *folio);
/*
* After the initialization objcg->memcg is always pointing at
@@ -384,89 +377,95 @@ static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
}
/*
- * __page_memcg - get the memory cgroup associated with a non-kmem page
- * @page: a pointer to the page struct
+ * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
* proper memory cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages or
- * kmem pages.
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * kmem folios.
*/
-static inline struct mem_cgroup *__page_memcg(struct page *page)
+static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
{
- unsigned long memcg_data = page->memcg_data;
+ unsigned long memcg_data = folio->memcg_data;
- VM_BUG_ON_PAGE(PageSlab(page), page);
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}
/*
- * __page_objcg - get the object cgroup associated with a kmem page
- * @page: a pointer to the page struct
+ * __folio_objcg - get the object cgroup associated with a kmem folio.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the object cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
+ * Returns a pointer to the object cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
* proper object cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages or
- * LRU pages.
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * LRU folios.
*/
-static inline struct obj_cgroup *__page_objcg(struct page *page)
+static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
{
- unsigned long memcg_data = page->memcg_data;
+ unsigned long memcg_data = folio->memcg_data;
- VM_BUG_ON_PAGE(PageSlab(page), page);
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
- VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page);
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}
/*
- * page_memcg - get the memory cgroup associated with a page
- * @page: a pointer to the page struct
+ * folio_memcg - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
* proper memory cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages.
+ * against some type of folios, e.g. slab folios or ex-slab folios.
*
- * For a non-kmem page any of the following ensures page and memcg binding
+ * For a non-kmem folio any of the following ensures folio and memcg binding
* stability:
*
- * - the page lock
+ * - the folio lock
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
*
- * For a kmem page a caller should hold an rcu read lock to protect memcg
- * associated with a kmem page from being released.
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
*/
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return obj_cgroup_memcg(__folio_objcg(folio));
+ return __folio_memcg(folio);
+}
+
static inline struct mem_cgroup *page_memcg(struct page *page)
{
- if (PageMemcgKmem(page))
- return obj_cgroup_memcg(__page_objcg(page));
- else
- return __page_memcg(page);
+ return folio_memcg(page_folio(page));
}
-/*
- * page_memcg_rcu - locklessly get the memory cgroup associated with a page
- * @page: a pointer to the page struct
+/**
+ * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
+ * This function assumes that the folio is known to have a
* proper memory cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages.
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * Return: A pointer to the memory cgroup associated with the folio,
+ * or NULL.
*/
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{
- unsigned long memcg_data = READ_ONCE(page->memcg_data);
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
- VM_BUG_ON_PAGE(PageSlab(page), page);
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
WARN_ON_ONCE(!rcu_read_lock_held());
if (memcg_data & MEMCG_DATA_KMEM) {
@@ -523,17 +522,18 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
#ifdef CONFIG_MEMCG_KMEM
/*
- * PageMemcgKmem - check if the page has MemcgKmem flag set
- * @page: a pointer to the page struct
+ * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
+ * @folio: Pointer to the folio.
*
- * Checks if the page has MemcgKmem flag set. The caller must ensure that
- * the page has an associated memory cgroup. It's not safe to call this function
- * against some types of pages, e.g. slab pages.
+ * Checks if the folio has MemcgKmem flag set. The caller must ensure
+ * that the folio has an associated memory cgroup. It's not safe to call
+ * this function against some types of folios, e.g. slab folios.
*/
-static inline bool PageMemcgKmem(struct page *page)
+static inline bool folio_memcg_kmem(struct folio *folio)
{
- VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
- return page->memcg_data & MEMCG_DATA_KMEM;
+ VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
+ VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
+ return folio->memcg_data & MEMCG_DATA_KMEM;
}
/*
@@ -577,7 +577,7 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
}
#else
-static inline bool PageMemcgKmem(struct page *page)
+static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
}
@@ -593,6 +593,11 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
}
#endif
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return folio_memcg_kmem(page_folio(page));
+}
+
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return (memcg == root_mem_cgroup);
@@ -684,26 +689,47 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
page_counter_read(&memcg->memory);
}
-int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask);
-static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
+int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
+
+/**
+ * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
+ * @folio: Folio to charge.
+ * @mm: mm context of the allocating task.
+ * @gfp: Reclaim mode.
+ *
+ * Try to charge @folio to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp if necessary. If @mm is NULL, try to
+ * charge to the active memcg.
+ *
+ * Do not use this for folios allocated for swapin.
+ *
+ * Return: 0 on success. Otherwise, an error code is returned.
+ */
+static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp)
{
if (mem_cgroup_disabled())
return 0;
- return __mem_cgroup_charge(page, mm, gfp_mask);
+ return __mem_cgroup_charge(folio, mm, gfp);
}
int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
-void __mem_cgroup_uncharge(struct page *page);
-static inline void mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio);
+
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
{
if (mem_cgroup_disabled())
return;
- __mem_cgroup_uncharge(page);
+ __mem_cgroup_uncharge(folio);
}
void __mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -714,7 +740,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
__mem_cgroup_uncharge_list(page_list);
}
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
/**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
@@ -753,33 +779,33 @@ out:
}
/**
- * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
- * @page: the page
+ * folio_lruvec - return lruvec for isolating/putting an LRU folio
+ * @folio: Pointer to the folio.
*
- * This function relies on page->mem_cgroup being stable.
+ * This function relies on folio->mem_cgroup being stable.
*/
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
+static inline struct lruvec *folio_lruvec(struct folio *folio)
{
- pg_data_t *pgdat = page_pgdat(page);
- struct mem_cgroup *memcg = page_memcg(page);
+ struct mem_cgroup *memcg = folio_memcg(folio);
- VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
- return mem_cgroup_lruvec(memcg, pgdat);
+ VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
+ return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
-struct lruvec *lock_page_lruvec(struct page *page);
-struct lruvec *lock_page_lruvec_irq(struct page *page);
-struct lruvec *lock_page_lruvec_irqsave(struct page *page,
+struct lruvec *folio_lruvec_lock(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags);
#ifdef CONFIG_DEBUG_VM
-void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
#else
-static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}
#endif
@@ -947,6 +973,8 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
extern bool cgroup_memory_noswap;
#endif
+void folio_memcg_lock(struct folio *folio);
+void folio_memcg_unlock(struct folio *folio);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
@@ -1115,12 +1143,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
#define MEM_CGROUP_ID_SHIFT 0
#define MEM_CGROUP_ID_MAX 0
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ return NULL;
+}
+
static inline struct mem_cgroup *page_memcg(struct page *page)
{
return NULL;
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return NULL;
@@ -1131,6 +1164,11 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return NULL;
}
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
static inline bool PageMemcgKmem(struct page *page)
{
return false;
@@ -1179,8 +1217,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
return false;
}
-static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
+static inline int mem_cgroup_charge(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
@@ -1195,7 +1233,7 @@ static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
{
}
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
@@ -1203,7 +1241,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
@@ -1213,14 +1251,14 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
return &pgdat->__lruvec;
}
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
+static inline struct lruvec *folio_lruvec(struct folio *folio)
{
- pg_data_t *pgdat = page_pgdat(page);
-
+ struct pglist_data *pgdat = folio_pgdat(folio);
return &pgdat->__lruvec;
}
-static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}
@@ -1250,26 +1288,26 @@ static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
-static inline struct lruvec *lock_page_lruvec(struct page *page)
+static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
{
- struct pglist_data *pgdat = page_pgdat(page);
+ struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}
-static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
+static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
- struct pglist_data *pgdat = page_pgdat(page);
+ struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irq(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}
-static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
+static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flagsp)
{
- struct pglist_data *pgdat = page_pgdat(page);
+ struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
return &pgdat->__lruvec;
@@ -1356,6 +1394,14 @@ static inline void unlock_page_memcg(struct page *page)
{
}
+static inline void folio_memcg_lock(struct folio *folio)
+{
+}
+
+static inline void folio_memcg_unlock(struct folio *folio)
+{
+}
+
static inline void mem_cgroup_handle_over_high(void)
{
}
@@ -1517,38 +1563,39 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
}
/* Test requires a stable page->memcg binding, see page_memcg() */
-static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
+static inline bool folio_matches_lruvec(struct folio *folio,
+ struct lruvec *lruvec)
{
- return lruvec_pgdat(lruvec) == page_pgdat(page) &&
- lruvec_memcg(lruvec) == page_memcg(page);
+ return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+ lruvec_memcg(lruvec) == folio_memcg(folio);
}
/* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
struct lruvec *locked_lruvec)
{
if (locked_lruvec) {
- if (page_matches_lruvec(page, locked_lruvec))
+ if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irq(locked_lruvec);
}
- return lock_page_lruvec_irq(page);
+ return folio_lruvec_lock_irq(folio);
}
/* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec *locked_lruvec, unsigned long *flags)
{
if (locked_lruvec) {
- if (page_matches_lruvec(page, locked_lruvec))
+ if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
}
- return lock_page_lruvec_irqsave(page, flags);
+ return folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -1558,17 +1605,17 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
unsigned long *pwriteback);
-void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
struct bdi_writeback *wb);
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
if (mem_cgroup_disabled())
return;
- if (unlikely(&page_memcg(page)->css != wb->memcg_css))
- mem_cgroup_track_foreign_dirty_slowpath(page, wb);
+ if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
+ mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
}
void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
@@ -1588,7 +1635,7 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
{
}
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
}
@@ -1613,7 +1660,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true;
do {
- if (time_before(jiffies, memcg->socket_pressure))
+ if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 7efc0a7c14c9..88eb587b5143 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -96,7 +96,6 @@ struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
int status_change_nid_normal;
- int status_change_nid_high;
int status_change_nid;
};
@@ -110,7 +109,7 @@ struct mem_section;
#define SLAB_CALLBACK_PRI 1
#define IPC_CALLBACK_PRI 10
-#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifndef CONFIG_MEMORY_HOTPLUG
static inline void memory_dev_init(void)
{
return;
@@ -126,7 +125,14 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
-#else
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
+}
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
+#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
@@ -140,7 +146,6 @@ typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
-#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
extern int memory_group_register_static(int nid, unsigned long max_pages);
extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
@@ -149,9 +154,6 @@ struct memory_group *memory_group_find_by_id(int mgid);
typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
struct memory_group *excluded, void *arg);
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-
-#ifdef CONFIG_MEMORY_HOTPLUG
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
@@ -159,12 +161,7 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
})
#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
-#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
-#endif
+#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index e5a867c950b2..be48e003a518 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -98,9 +98,6 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
extern void adjust_present_page_count(struct page *page,
struct memory_group *group,
long nr_pages);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4091692bed8c..3c7595e81150 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -8,7 +8,6 @@
#include <linux/sched.h>
#include <linux/mmzone.h>
-#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -184,8 +183,6 @@ extern bool vma_migratable(struct vm_area_struct *vma);
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
-extern bool numa_demotion_enabled;
-
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
return (pol->mode == MPOL_PREFERRED_MANY);
@@ -301,8 +298,6 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
return NULL;
}
-#define numa_demotion_enabled false
-
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
return false;
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index fa7a43f02f27..8db52324f416 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -36,6 +36,7 @@ enum da9063_variant_codes {
PMIC_DA9063_BB = 0x5,
PMIC_DA9063_CA = 0x6,
PMIC_DA9063_DA = 0x7,
+ PMIC_DA9063_EA = 0x8,
};
/* Interrupts */
diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h
deleted file mode 100644
index e5b8dbf828b6..000000000000
--- a/include/linux/mfd/hi6421-spmi-pmic.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Header file for device driver Hi6421 PMIC
- *
- * Copyright (c) 2013 Linaro Ltd.
- * Copyright (C) 2011 Hisilicon.
- * Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
- *
- * Guodong Xu <guodong.xu@linaro.org>
- */
-
-#ifndef __HISI_PMIC_H
-#define __HISI_PMIC_H
-
-#include <linux/irqdomain.h>
-#include <linux/regmap.h>
-
-struct hi6421_spmi_pmic {
- struct resource *res;
- struct device *dev;
- void __iomem *regs;
- struct regmap *regmap;
-};
-
-#endif /* __HISI_PMIC_H */
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
index 92d763230bdf..a18c1539a152 100644
--- a/include/linux/mfd/idt8a340_reg.h
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -506,6 +506,10 @@
#define STATE_MODE_SHIFT (0)
#define STATE_MODE_MASK (0x7)
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
/* Bit definitions for the GPIO_CFG_GBL register */
#define SUPPLY_MODE_SHIFT (0)
#define SUPPLY_MODE_MASK (0x3)
@@ -654,7 +658,7 @@
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
PLL_MODE_MIN = 0,
- PLL_MODE_NORMAL = PLL_MODE_MIN,
+ PLL_MODE_PLL = PLL_MODE_MIN,
PLL_MODE_WRITE_PHASE = 1,
PLL_MODE_WRITE_FREQUENCY = 2,
PLL_MODE_GPIO_INC_DEC = 3,
@@ -664,6 +668,31 @@ enum pll_mode {
PLL_MODE_MAX = PLL_MODE_DISABLED,
};
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
enum hw_tod_write_trig_sel {
HW_TOD_WR_TRIG_SEL_MIN = 0,
HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 833e578e051e..b1482b3cf353 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -133,35 +133,35 @@ enum max77686_pmic_reg {
/* Reserved: 0x7A-0x7D */
MAX77686_REG_BBAT_CHG = 0x7E,
- MAX77686_REG_32KHZ = 0x7F,
+ MAX77686_REG_32KHZ = 0x7F,
MAX77686_REG_PMIC_END = 0x80,
};
enum max77686_rtc_reg {
- MAX77686_RTC_INT = 0x00,
- MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
MAX77686_RTC_CONTROLM = 0x02,
MAX77686_RTC_CONTROL = 0x03,
MAX77686_RTC_UPDATE0 = 0x04,
/* Reserved: 0x5 */
MAX77686_WTSR_SMPL_CNTL = 0x06,
- MAX77686_RTC_SEC = 0x07,
- MAX77686_RTC_MIN = 0x08,
- MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
MAX77686_RTC_WEEKDAY = 0x0A,
- MAX77686_RTC_MONTH = 0x0B,
- MAX77686_RTC_YEAR = 0x0C,
- MAX77686_RTC_DATE = 0x0D,
- MAX77686_ALARM1_SEC = 0x0E,
- MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_DATE = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
MAX77686_ALARM1_HOUR = 0x10,
MAX77686_ALARM1_WEEKDAY = 0x11,
MAX77686_ALARM1_MONTH = 0x12,
MAX77686_ALARM1_YEAR = 0x13,
MAX77686_ALARM1_DATE = 0x14,
- MAX77686_ALARM2_SEC = 0x15,
- MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
MAX77686_ALARM2_HOUR = 0x17,
MAX77686_ALARM2_WEEKDAY = 0x18,
MAX77686_ALARM2_MONTH = 0x19,
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 90b20550c1c8..06d3f11dc3c9 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -45,6 +45,11 @@
#define STM32_LPTIM_PRESC GENMASK(11, 9)
#define STM32_LPTIM_CKPOL GENMASK(2, 1)
+/* STM32_LPTIM_CKPOL */
+#define STM32_LPTIM_CKPOL_RISING_EDGE 0
+#define STM32_LPTIM_CKPOL_FALLING_EDGE 1
+#define STM32_LPTIM_CKPOL_BOTH_EDGES 2
+
/* STM32_LPTIM_ARR */
#define STM32_LPTIM_MAX_ARR 0xFFFF
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index f8db83aedb2b..5f5c43fd69dd 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -82,6 +82,10 @@
#define MAX_TIM_ICPSC 0x3
#define TIM_CR2_MMS_SHIFT 4
#define TIM_CR2_MMS2_SHIFT 20
+#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
+#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
#define TIM_SMCR_TS_SHIFT 4
#define TIM_BDTR_BKF_MASK 0xF
#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index ffc091b77633..ba13e043d910 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -1,22 +1,16 @@
-#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
-#define __LINUX_TI_AM335X_TSCADC_MFD_H
-
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Touch Screen / ADC MFD driver
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+#include <linux/bitfield.h>
#include <linux/mfd/core.h>
+#include <linux/units.h>
#define REG_RAWIRQSTATUS 0x024
#define REG_IRQSTATUS 0x028
@@ -46,13 +40,6 @@
/* IRQ wakeup enable */
#define IRQWKUP_ENB BIT(0)
-/* Step Enable */
-#define STEPENB_MASK (0x1FFFF << 0)
-#define STEPENB(val) ((val) << 0)
-#define ENB(val) (1 << (val))
-#define STPENB_STEPENB STEPENB(0x1FFFF)
-#define STPENB_STEPENB_TC STEPENB(0x1FFF)
-
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
#define IRQENB_EOS BIT(1)
@@ -65,12 +52,10 @@
#define IRQENB_PENUP BIT(9)
/* Step Configuration */
-#define STEPCONFIG_MODE_MASK (3 << 0)
-#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE(val) FIELD_PREP(GENMASK(1, 0), (val))
#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
-#define STEPCONFIG_AVG_MASK (7 << 2)
-#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG(val) FIELD_PREP(GENMASK(4, 2), (val))
#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
#define STEPCONFIG_XPP BIT(5)
#define STEPCONFIG_XNN BIT(6)
@@ -78,70 +63,68 @@
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
-#define STEPCONFIG_RFP(val) ((val) << 12)
-#define STEPCONFIG_RFP_VREFP (0x3 << 12)
-#define STEPCONFIG_INM_MASK (0xF << 15)
-#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_RFP(val) FIELD_PREP(GENMASK(13, 12), (val))
+#define STEPCONFIG_RFP_VREFP STEPCONFIG_RFP(3)
+#define STEPCONFIG_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
-#define STEPCONFIG_INP_MASK (0xF << 19)
-#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
-#define STEPCONFIG_RFM(val) ((val) << 23)
-#define STEPCONFIG_RFM_VREFN (0x3 << 23)
+#define STEPCONFIG_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
+#define STEPCONFIG_RFM_VREFN STEPCONFIG_RFM(3)
/* Delay register */
-#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
-#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPDELAY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
-#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
-#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_MAX_OPENDLY GENMASK(17, 0)
+#define STEPDELAY_SAMPLE(val) FIELD_PREP(GENMASK(31, 24), (val))
#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+#define STEPCONFIG_MAX_SAMPLE GENMASK(7, 0)
/* Charge Config */
-#define STEPCHARGE_RFP_MASK (7 << 12)
-#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP(val) FIELD_PREP(GENMASK(14, 12), (val))
#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
-#define STEPCHARGE_INM_MASK (0xF << 15)
-#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
-#define STEPCHARGE_INP_MASK (0xF << 19)
-#define STEPCHARGE_INP(val) ((val) << 19)
-#define STEPCHARGE_RFM_MASK (3 << 23)
-#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
+#define STEPCHARGE_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
/* Charge delay */
-#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
-#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
-#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_SSENB BIT(0)
#define CNTRLREG_STEPID BIT(1)
-#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_TSC_STEPCONFIGWRT BIT(2)
#define CNTRLREG_POWERDOWN BIT(4)
-#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
-#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
-#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
-#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
-#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
-#define CNTRLREG_TSCENB BIT(7)
+#define CNTRLREG_TSC_AFE_CTRL(val) FIELD_PREP(GENMASK(6, 5), (val))
+#define CNTRLREG_TSC_4WIRE CNTRLREG_TSC_AFE_CTRL(1)
+#define CNTRLREG_TSC_5WIRE CNTRLREG_TSC_AFE_CTRL(2)
+#define CNTRLREG_TSC_8WIRE CNTRLREG_TSC_AFE_CTRL(3)
+#define CNTRLREG_TSC_ENB BIT(7)
+
+/*Control registers bitfields for MAGADC IP */
+#define CNTRLREG_MAGADCENB BIT(0)
+#define CNTRLREG_MAG_PREAMP_PWRDOWN BIT(5)
+#define CNTRLREG_MAG_PREAMP_BYPASS BIT(6)
/* FIFO READ Register */
-#define FIFOREAD_DATA_MASK (0xfff << 0)
-#define FIFOREAD_CHNLID_MASK (0xf << 16)
+#define FIFOREAD_DATA_MASK GENMASK(11, 0)
+#define FIFOREAD_CHNLID_MASK GENMASK(19, 16)
/* DMA ENABLE/CLEAR Register */
#define DMA_FIFO0 BIT(0)
#define DMA_FIFO1 BIT(1)
/* Sequencer Status */
-#define SEQ_STATUS BIT(5)
+#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
-#define ADC_CLK 3000000
+#define TSC_ADC_CLK (3 * HZ_PER_MHZ)
+#define MAG_ADC_CLK (13 * HZ_PER_MHZ)
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
#define FIFO1_THRESHOLD 19
@@ -158,21 +141,27 @@
*
* max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 83 /* milliseconds */
+#define IDLE_TIMEOUT_MS 83 /* milliseconds */
#define TSCADC_CELLS 2
+struct ti_tscadc_data {
+ char *adc_feature_name;
+ char *adc_feature_compatible;
+ char *secondary_feature_name;
+ char *secondary_feature_compatible;
+ unsigned int target_clk_rate;
+};
+
struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
phys_addr_t tscadc_phys_base;
+ const struct ti_tscadc_data *data;
int irq;
- int used_cells; /* 1-2 */
- int tsc_wires;
- int tsc_cell; /* -1 if not used */
- int adc_cell; /* -1 if not used */
struct mfd_cell cells[TSCADC_CELLS];
+ u32 ctrl;
u32 reg_se_cache;
bool adc_waiting;
bool adc_in_use;
@@ -194,6 +183,12 @@ static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
return *tscadc_dev;
}
+static inline bool ti_adc_with_touchscreen(struct ti_tscadc_dev *tscadc)
+{
+ return of_device_is_compatible(tscadc->dev->of_node,
+ "ti,am3359-tscadc");
+}
+
void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val);
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 7943e413deae..8a61386cb8c1 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -322,6 +322,6 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-int tps65912_device_exit(struct tps65912 *tps);
+void tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
deleted file mode 100644
index 2c75c9c9318f..000000000000
--- a/include/linux/mfd/tps80031.h
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
-
-#ifndef __LINUX_MFD_TPS80031_H
-#define __LINUX_MFD_TPS80031_H
-
-#include <linux/device.h>
-#include <linux/regmap.h>
-
-/* Pull-ups/Pull-downs */
-#define TPS80031_CFG_INPUT_PUPD1 0xF0
-#define TPS80031_CFG_INPUT_PUPD2 0xF1
-#define TPS80031_CFG_INPUT_PUPD3 0xF2
-#define TPS80031_CFG_INPUT_PUPD4 0xF3
-#define TPS80031_CFG_LDO_PD1 0xF4
-#define TPS80031_CFG_LDO_PD2 0xF5
-#define TPS80031_CFG_SMPS_PD 0xF6
-
-/* Real Time Clock */
-#define TPS80031_SECONDS_REG 0x00
-#define TPS80031_MINUTES_REG 0x01
-#define TPS80031_HOURS_REG 0x02
-#define TPS80031_DAYS_REG 0x03
-#define TPS80031_MONTHS_REG 0x04
-#define TPS80031_YEARS_REG 0x05
-#define TPS80031_WEEKS_REG 0x06
-#define TPS80031_ALARM_SECONDS_REG 0x08
-#define TPS80031_ALARM_MINUTES_REG 0x09
-#define TPS80031_ALARM_HOURS_REG 0x0A
-#define TPS80031_ALARM_DAYS_REG 0x0B
-#define TPS80031_ALARM_MONTHS_REG 0x0C
-#define TPS80031_ALARM_YEARS_REG 0x0D
-#define TPS80031_RTC_CTRL_REG 0x10
-#define TPS80031_RTC_STATUS_REG 0x11
-#define TPS80031_RTC_INTERRUPTS_REG 0x12
-#define TPS80031_RTC_COMP_LSB_REG 0x13
-#define TPS80031_RTC_COMP_MSB_REG 0x14
-#define TPS80031_RTC_RESET_STATUS_REG 0x16
-
-/*PMC Master Module */
-#define TPS80031_PHOENIX_START_CONDITION 0x1F
-#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
-#define TPS80031_STS_HW_CONDITIONS 0x21
-#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
-#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
-#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
-#define TPS80031_PHOENIX_DEV_ON 0x25
-#define TPS80031_STS_PWR_GRP_STATE 0x27
-#define TPS80031_PH_CFG_VSYSLOW 0x28
-#define TPS80031_PH_STS_BOOT 0x29
-#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
-#define TPS80031_PHOENIX_SEQ_CFG 0x2B
-#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
-#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
-#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
-
-/* PMC Slave Module - Broadcast */
-#define TPS80031_BROADCAST_ADDR_ALL 0x31
-#define TPS80031_BROADCAST_ADDR_REF 0x32
-#define TPS80031_BROADCAST_ADDR_PROV 0x33
-#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
-
-/* PMC Slave Module SMPS Regulators */
-#define TPS80031_SMPS4_CFG_TRANS 0x41
-#define TPS80031_SMPS4_CFG_STATE 0x42
-#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
-#define TPS80031_VIO_CFG_TRANS 0x47
-#define TPS80031_VIO_CFG_STATE 0x48
-#define TPS80031_VIO_CFG_FORCE 0x49
-#define TPS80031_VIO_CFG_VOLTAGE 0x4A
-#define TPS80031_VIO_CFG_STEP 0x48
-#define TPS80031_SMPS1_CFG_TRANS 0x53
-#define TPS80031_SMPS1_CFG_STATE 0x54
-#define TPS80031_SMPS1_CFG_FORCE 0x55
-#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
-#define TPS80031_SMPS1_CFG_STEP 0x57
-#define TPS80031_SMPS2_CFG_TRANS 0x59
-#define TPS80031_SMPS2_CFG_STATE 0x5A
-#define TPS80031_SMPS2_CFG_FORCE 0x5B
-#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
-#define TPS80031_SMPS2_CFG_STEP 0x5D
-#define TPS80031_SMPS3_CFG_TRANS 0x65
-#define TPS80031_SMPS3_CFG_STATE 0x66
-#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
-
-/* PMC Slave Module LDO Regulators */
-#define TPS80031_VANA_CFG_TRANS 0x81
-#define TPS80031_VANA_CFG_STATE 0x82
-#define TPS80031_VANA_CFG_VOLTAGE 0x83
-#define TPS80031_LDO2_CFG_TRANS 0x85
-#define TPS80031_LDO2_CFG_STATE 0x86
-#define TPS80031_LDO2_CFG_VOLTAGE 0x87
-#define TPS80031_LDO4_CFG_TRANS 0x89
-#define TPS80031_LDO4_CFG_STATE 0x8A
-#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
-#define TPS80031_LDO3_CFG_TRANS 0x8D
-#define TPS80031_LDO3_CFG_STATE 0x8E
-#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
-#define TPS80031_LDO6_CFG_TRANS 0x91
-#define TPS80031_LDO6_CFG_STATE 0x92
-#define TPS80031_LDO6_CFG_VOLTAGE 0x93
-#define TPS80031_LDOLN_CFG_TRANS 0x95
-#define TPS80031_LDOLN_CFG_STATE 0x96
-#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
-#define TPS80031_LDO5_CFG_TRANS 0x99
-#define TPS80031_LDO5_CFG_STATE 0x9A
-#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
-#define TPS80031_LDO1_CFG_TRANS 0x9D
-#define TPS80031_LDO1_CFG_STATE 0x9E
-#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
-#define TPS80031_LDOUSB_CFG_TRANS 0xA1
-#define TPS80031_LDOUSB_CFG_STATE 0xA2
-#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
-#define TPS80031_LDO7_CFG_TRANS 0xA5
-#define TPS80031_LDO7_CFG_STATE 0xA6
-#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
-
-/* PMC Slave Module External Control */
-#define TPS80031_REGEN1_CFG_TRANS 0xAE
-#define TPS80031_REGEN1_CFG_STATE 0xAF
-#define TPS80031_REGEN2_CFG_TRANS 0xB1
-#define TPS80031_REGEN2_CFG_STATE 0xB2
-#define TPS80031_SYSEN_CFG_TRANS 0xB4
-#define TPS80031_SYSEN_CFG_STATE 0xB5
-
-/* PMC Slave Module Internal Control */
-#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
-#define TPS80031_NRESPWRON_CFG_STATE 0xB8
-#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
-#define TPS80031_CLK32KAO_CFG_STATE 0xBB
-#define TPS80031_CLK32KG_CFG_TRANS 0xBD
-#define TPS80031_CLK32KG_CFG_STATE 0xBE
-#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
-#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
-#define TPS80031_VRTC_CFG_TRANS 0xC3
-#define TPS80031_VRTC_CFG_STATE 0xC4
-#define TPS80031_BIAS_CFG_TRANS 0xC6
-#define TPS80031_BIAS_CFG_STATE 0xC7
-#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
-#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
-#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
-#define TPS80031_RC6MHZ_CFG_STATE 0xCD
-#define TPS80031_TMP_CFG_TRANS 0xCF
-#define TPS80031_TMP_CFG_STATE 0xD0
-
-/* PMC Slave Module resources assignment */
-#define TPS80031_PREQ1_RES_ASS_A 0xD7
-#define TPS80031_PREQ1_RES_ASS_B 0xD8
-#define TPS80031_PREQ1_RES_ASS_C 0xD9
-#define TPS80031_PREQ2_RES_ASS_A 0xDA
-#define TPS80031_PREQ2_RES_ASS_B 0xDB
-#define TPS80031_PREQ2_RES_ASS_C 0xDC
-#define TPS80031_PREQ3_RES_ASS_A 0xDD
-#define TPS80031_PREQ3_RES_ASS_B 0xDE
-#define TPS80031_PREQ3_RES_ASS_C 0xDF
-
-/* PMC Slave Module Miscellaneous */
-#define TPS80031_SMPS_OFFSET 0xE0
-#define TPS80031_SMPS_MULT 0xE3
-#define TPS80031_MISC1 0xE4
-#define TPS80031_MISC2 0xE5
-#define TPS80031_BBSPOR_CFG 0xE6
-#define TPS80031_TMP_CFG 0xE7
-
-/* Battery Charging Controller and Indicator LED */
-#define TPS80031_CONTROLLER_CTRL2 0xDA
-#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
-#define TPS80031_CHARGERUSB_VSYSREG 0xDC
-#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
-#define TPS80031_LINEAR_CHRG_STS 0xDE
-#define TPS80031_CONTROLLER_INT_MASK 0xE0
-#define TPS80031_CONTROLLER_CTRL1 0xE1
-#define TPS80031_CONTROLLER_WDG 0xE2
-#define TPS80031_CONTROLLER_STAT1 0xE3
-#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
-#define TPS80031_CHARGERUSB_INT_MASK 0xE5
-#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
-#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
-#define TPS80031_CHARGERUSB_CTRL1 0xE8
-#define TPS80031_CHARGERUSB_CTRL2 0xE9
-#define TPS80031_CHARGERUSB_CTRL3 0xEA
-#define TPS80031_CHARGERUSB_STAT1 0xEB
-#define TPS80031_CHARGERUSB_VOREG 0xEC
-#define TPS80031_CHARGERUSB_VICHRG 0xED
-#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
-#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
-#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
-#define TPS80031_LED_PWM_CTRL1 0xF4
-#define TPS80031_LED_PWM_CTRL2 0xF5
-
-/* USB On-The-Go */
-#define TPS80031_BACKUP_REG 0xFA
-#define TPS80031_USB_VENDOR_ID_LSB 0x00
-#define TPS80031_USB_VENDOR_ID_MSB 0x01
-#define TPS80031_USB_PRODUCT_ID_LSB 0x02
-#define TPS80031_USB_PRODUCT_ID_MSB 0x03
-#define TPS80031_USB_VBUS_CTRL_SET 0x04
-#define TPS80031_USB_VBUS_CTRL_CLR 0x05
-#define TPS80031_USB_ID_CTRL_SET 0x06
-#define TPS80031_USB_ID_CTRL_CLR 0x07
-#define TPS80031_USB_VBUS_INT_SRC 0x08
-#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
-#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
-#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
-#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
-#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
-#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
-#define TPS80031_USB_ID_INT_SRC 0x0F
-#define TPS80031_USB_ID_INT_LATCH_SET 0x10
-#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
-#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
-#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
-#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
-#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
-#define TPS80031_USB_OTG_ADP_CTRL 0x16
-#define TPS80031_USB_OTG_ADP_HIGH 0x17
-#define TPS80031_USB_OTG_ADP_LOW 0x18
-#define TPS80031_USB_OTG_ADP_RISE 0x19
-#define TPS80031_USB_OTG_REVISION 0x1A
-
-/* Gas Gauge */
-#define TPS80031_FG_REG_00 0xC0
-#define TPS80031_FG_REG_01 0xC1
-#define TPS80031_FG_REG_02 0xC2
-#define TPS80031_FG_REG_03 0xC3
-#define TPS80031_FG_REG_04 0xC4
-#define TPS80031_FG_REG_05 0xC5
-#define TPS80031_FG_REG_06 0xC6
-#define TPS80031_FG_REG_07 0xC7
-#define TPS80031_FG_REG_08 0xC8
-#define TPS80031_FG_REG_09 0xC9
-#define TPS80031_FG_REG_10 0xCA
-#define TPS80031_FG_REG_11 0xCB
-
-/* General Purpose ADC */
-#define TPS80031_GPADC_CTRL 0x2E
-#define TPS80031_GPADC_CTRL2 0x2F
-#define TPS80031_RTSELECT_LSB 0x32
-#define TPS80031_RTSELECT_ISB 0x33
-#define TPS80031_RTSELECT_MSB 0x34
-#define TPS80031_GPSELECT_ISB 0x35
-#define TPS80031_CTRL_P1 0x36
-#define TPS80031_RTCH0_LSB 0x37
-#define TPS80031_RTCH0_MSB 0x38
-#define TPS80031_RTCH1_LSB 0x39
-#define TPS80031_RTCH1_MSB 0x3A
-#define TPS80031_GPCH0_LSB 0x3B
-#define TPS80031_GPCH0_MSB 0x3C
-
-/* SIM, MMC and Battery Detection */
-#define TPS80031_SIMDEBOUNCING 0xEB
-#define TPS80031_SIMCTRL 0xEC
-#define TPS80031_MMCDEBOUNCING 0xED
-#define TPS80031_MMCCTRL 0xEE
-#define TPS80031_BATDEBOUNCING 0xEF
-
-/* Vibrator Driver and PWMs */
-#define TPS80031_VIBCTRL 0x9B
-#define TPS80031_VIBMODE 0x9C
-#define TPS80031_PWM1ON 0xBA
-#define TPS80031_PWM1OFF 0xBB
-#define TPS80031_PWM2ON 0xBD
-#define TPS80031_PWM2OFF 0xBE
-
-/* Control Interface */
-#define TPS80031_INT_STS_A 0xD0
-#define TPS80031_INT_STS_B 0xD1
-#define TPS80031_INT_STS_C 0xD2
-#define TPS80031_INT_MSK_LINE_A 0xD3
-#define TPS80031_INT_MSK_LINE_B 0xD4
-#define TPS80031_INT_MSK_LINE_C 0xD5
-#define TPS80031_INT_MSK_STS_A 0xD6
-#define TPS80031_INT_MSK_STS_B 0xD7
-#define TPS80031_INT_MSK_STS_C 0xD8
-#define TPS80031_TOGGLE1 0x90
-#define TPS80031_TOGGLE2 0x91
-#define TPS80031_TOGGLE3 0x92
-#define TPS80031_PWDNSTATUS1 0x93
-#define TPS80031_PWDNSTATUS2 0x94
-#define TPS80031_VALIDITY0 0x17
-#define TPS80031_VALIDITY1 0x18
-#define TPS80031_VALIDITY2 0x19
-#define TPS80031_VALIDITY3 0x1A
-#define TPS80031_VALIDITY4 0x1B
-#define TPS80031_VALIDITY5 0x1C
-#define TPS80031_VALIDITY6 0x1D
-#define TPS80031_VALIDITY7 0x1E
-
-/* Version number related register */
-#define TPS80031_JTAGVERNUM 0x87
-#define TPS80031_EPROM_REV 0xDF
-
-/* GPADC Trimming Bits. */
-#define TPS80031_GPADC_TRIM0 0xCC
-#define TPS80031_GPADC_TRIM1 0xCD
-#define TPS80031_GPADC_TRIM2 0xCE
-#define TPS80031_GPADC_TRIM3 0xCF
-#define TPS80031_GPADC_TRIM4 0xD0
-#define TPS80031_GPADC_TRIM5 0xD1
-#define TPS80031_GPADC_TRIM6 0xD2
-#define TPS80031_GPADC_TRIM7 0xD3
-#define TPS80031_GPADC_TRIM8 0xD4
-#define TPS80031_GPADC_TRIM9 0xD5
-#define TPS80031_GPADC_TRIM10 0xD6
-#define TPS80031_GPADC_TRIM11 0xD7
-#define TPS80031_GPADC_TRIM12 0xD8
-#define TPS80031_GPADC_TRIM13 0xD9
-#define TPS80031_GPADC_TRIM14 0xDA
-#define TPS80031_GPADC_TRIM15 0xDB
-#define TPS80031_GPADC_TRIM16 0xDC
-#define TPS80031_GPADC_TRIM17 0xDD
-#define TPS80031_GPADC_TRIM18 0xDE
-
-/* TPS80031_CONTROLLER_STAT1 bit fields */
-#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
-#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
-#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
-#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
-#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
-#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
-/* TPS80031_CONTROLLER_INT_MASK bit filed */
-#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
-#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
-#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
-#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
-
-#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
-
-/* TPS80031_PHOENIX_DEV_ON bit field */
-#define TPS80031_DEVOFF 0x1
-
-#define TPS80031_EXT_CONTROL_CFG_TRANS 0
-#define TPS80031_EXT_CONTROL_CFG_STATE 1
-
-/* State register field */
-#define TPS80031_STATE_OFF 0x00
-#define TPS80031_STATE_ON 0x01
-#define TPS80031_STATE_MASK 0x03
-
-/* Trans register field */
-#define TPS80031_TRANS_ACTIVE_OFF 0x00
-#define TPS80031_TRANS_ACTIVE_ON 0x01
-#define TPS80031_TRANS_ACTIVE_MASK 0x03
-#define TPS80031_TRANS_SLEEP_OFF 0x00
-#define TPS80031_TRANS_SLEEP_ON 0x04
-#define TPS80031_TRANS_SLEEP_MASK 0x0C
-#define TPS80031_TRANS_OFF_OFF 0x00
-#define TPS80031_TRANS_OFF_ACTIVE 0x10
-#define TPS80031_TRANS_OFF_MASK 0x30
-
-#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
- TPS80031_PWR_REQ_INPUT_PREQ2 | \
- TPS80031_PWR_REQ_INPUT_PREQ3)
-
-/* TPS80031_BBSPOR_CFG bit field */
-#define TPS80031_BBSPOR_CHG_EN 0x8
-#define TPS80031_MAX_REGISTER 0xFF
-
-struct i2c_client;
-
-/* Supported chips */
-enum chips {
- TPS80031 = 0x00000001,
- TPS80032 = 0x00000002,
-};
-
-enum {
- TPS80031_INT_PWRON,
- TPS80031_INT_RPWRON,
- TPS80031_INT_SYS_VLOW,
- TPS80031_INT_RTC_ALARM,
- TPS80031_INT_RTC_PERIOD,
- TPS80031_INT_HOT_DIE,
- TPS80031_INT_VXX_SHORT,
- TPS80031_INT_SPDURATION,
- TPS80031_INT_WATCHDOG,
- TPS80031_INT_BAT,
- TPS80031_INT_SIM,
- TPS80031_INT_MMC,
- TPS80031_INT_RES,
- TPS80031_INT_GPADC_RT,
- TPS80031_INT_GPADC_SW2_EOC,
- TPS80031_INT_CC_AUTOCAL,
- TPS80031_INT_ID_WKUP,
- TPS80031_INT_VBUSS_WKUP,
- TPS80031_INT_ID,
- TPS80031_INT_VBUS,
- TPS80031_INT_CHRG_CTRL,
- TPS80031_INT_EXT_CHRG,
- TPS80031_INT_INT_CHRG,
- TPS80031_INT_RES2,
- TPS80031_INT_BAT_TEMP_OVRANGE,
- TPS80031_INT_BAT_REMOVED,
- TPS80031_INT_VBUS_DET,
- TPS80031_INT_VAC_DET,
- TPS80031_INT_FAULT_WDG,
- TPS80031_INT_LINCH_GATED,
-
- /* Last interrupt id to get the end number */
- TPS80031_INT_NR,
-};
-
-/* TPS80031 Slave IDs */
-#define TPS80031_NUM_SLAVES 4
-#define TPS80031_SLAVE_ID0 0
-#define TPS80031_SLAVE_ID1 1
-#define TPS80031_SLAVE_ID2 2
-#define TPS80031_SLAVE_ID3 3
-
-/* TPS80031 I2C addresses */
-#define TPS80031_I2C_ID0_ADDR 0x12
-#define TPS80031_I2C_ID1_ADDR 0x48
-#define TPS80031_I2C_ID2_ADDR 0x49
-#define TPS80031_I2C_ID3_ADDR 0x4A
-
-enum {
- TPS80031_REGULATOR_VIO,
- TPS80031_REGULATOR_SMPS1,
- TPS80031_REGULATOR_SMPS2,
- TPS80031_REGULATOR_SMPS3,
- TPS80031_REGULATOR_SMPS4,
- TPS80031_REGULATOR_VANA,
- TPS80031_REGULATOR_LDO1,
- TPS80031_REGULATOR_LDO2,
- TPS80031_REGULATOR_LDO3,
- TPS80031_REGULATOR_LDO4,
- TPS80031_REGULATOR_LDO5,
- TPS80031_REGULATOR_LDO6,
- TPS80031_REGULATOR_LDO7,
- TPS80031_REGULATOR_LDOLN,
- TPS80031_REGULATOR_LDOUSB,
- TPS80031_REGULATOR_VBUS,
- TPS80031_REGULATOR_REGEN1,
- TPS80031_REGULATOR_REGEN2,
- TPS80031_REGULATOR_SYSEN,
- TPS80031_REGULATOR_MAX,
-};
-
-/* Different configurations for the rails */
-enum {
- /* USBLDO input selection */
- TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
- TPS80031_USBLDO_INPUT_PMID = 0x00000002,
-
- /* LDO3 output mode */
- TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
-
- /* VBUS configuration */
- TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
- TPS80031_VBUS_SW_ONLY = 0x00000008,
- TPS80031_VBUS_SW_N_ID = 0x00000010,
-};
-
-/* External controls requests */
-enum tps80031_ext_control {
- TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
- TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
- TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
- TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
- TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
- TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
-};
-
-enum tps80031_pupd_pins {
- TPS80031_PREQ1 = 0,
- TPS80031_PREQ2A,
- TPS80031_PREQ2B,
- TPS80031_PREQ2C,
- TPS80031_PREQ3,
- TPS80031_NRES_WARM,
- TPS80031_PWM_FORCE,
- TPS80031_CHRG_EXT_CHRG_STATZ,
- TPS80031_SIM,
- TPS80031_MMC,
- TPS80031_GPADC_START,
- TPS80031_DVSI2C_SCL,
- TPS80031_DVSI2C_SDA,
- TPS80031_CTLI2C_SCL,
- TPS80031_CTLI2C_SDA,
-};
-
-enum tps80031_pupd_settings {
- TPS80031_PUPD_NORMAL,
- TPS80031_PUPD_PULLDOWN,
- TPS80031_PUPD_PULLUP,
-};
-
-struct tps80031 {
- struct device *dev;
- unsigned long chip_info;
- int es_version;
- struct i2c_client *clients[TPS80031_NUM_SLAVES];
- struct regmap *regmap[TPS80031_NUM_SLAVES];
- struct regmap_irq_chip_data *irq_data;
-};
-
-struct tps80031_pupd_init_data {
- int input_pin;
- int setting;
-};
-
-/*
- * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
- *
- * @reg_init_data: The regulator init data.
- * @ext_ctrl_flag: External control flag for sleep/power request control.
- * @config_flags: Configuration flag to configure the rails.
- * It should be ORed of config enums.
- */
-
-struct tps80031_regulator_platform_data {
- struct regulator_init_data *reg_init_data;
- unsigned int ext_ctrl_flag;
- unsigned int config_flags;
-};
-
-struct tps80031_platform_data {
- int irq_base;
- bool use_power_off;
- struct tps80031_pupd_init_data *pupd_init_data;
- int pupd_init_data_size;
- struct tps80031_regulator_platform_data
- *regulator_pdata[TPS80031_REGULATOR_MAX];
-};
-
-static inline int tps80031_write(struct device *dev, int sid,
- int reg, uint8_t val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_write(tps80031->regmap[sid], reg, val);
-}
-
-static inline int tps80031_writes(struct device *dev, int sid, int reg,
- int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_read(struct device *dev, int sid,
- int reg, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
- unsigned int ival;
- int ret;
-
- ret = regmap_read(tps80031->regmap[sid], reg, &ival);
- if (ret < 0) {
- dev_err(dev, "failed reading from reg 0x%02x\n", reg);
- return ret;
- }
-
- *val = ival;
- return ret;
-}
-
-static inline int tps80031_reads(struct device *dev, int sid,
- int reg, int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_set_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg,
- bit_mask, bit_mask);
-}
-
-static inline int tps80031_clr_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
-}
-
-static inline int tps80031_update(struct device *dev, int sid,
- int reg, uint8_t val, uint8_t mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
-}
-
-static inline unsigned long tps80031_get_chip_info(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->chip_info;
-}
-
-static inline int tps80031_get_pmu_version(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->es_version;
-}
-
-static inline int tps80031_irq_get_virq(struct device *dev, int irq)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_irq_get_virq(tps80031->irq_data, irq);
-}
-
-extern int tps80031_ext_power_req_config(struct device *dev,
- unsigned long ext_ctrl_flag, int preq_bit,
- int state_reg_add, int trans_reg_add);
-#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 3d43c60b49fa..1f7c33b2f5a3 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -28,6 +28,7 @@
#define PHY_ID_KSZ9031 0x00221620
#define PHY_ID_KSZ9131 0x00221640
#define PHY_ID_LAN8814 0x00221660
+#define PHY_ID_LAN8804 0x00221670
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index c8077e936691..4850cc5bf813 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -19,24 +19,7 @@ struct migration_target_control;
*/
#define MIGRATEPAGE_SUCCESS 0
-/*
- * Keep sync with:
- * - macro MIGRATE_REASON in include/trace/events/migrate.h
- * - migrate_reason_names[MR_TYPES] in mm/debug.c
- */
-enum migrate_reason {
- MR_COMPACTION,
- MR_MEMORY_FAILURE,
- MR_MEMORY_HOTPLUG,
- MR_SYSCALL, /* also applies to cpusets */
- MR_MEMPOLICY_MBIND,
- MR_NUMA_MISPLACED,
- MR_CONTIG_RANGE,
- MR_LONGTERM_PIN,
- MR_DEMOTION,
- MR_TYPES
-};
-
+/* Defined in mm/debug.c: */
extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
@@ -57,6 +40,12 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
+void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
+void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
+int folio_migrate_mapping(struct address_space *mapping,
+ struct folio *newfolio, struct folio *folio, int extra_count);
+
+extern bool numa_demotion_enabled;
#else
static inline void putback_movable_pages(struct list_head *l) {}
@@ -82,6 +71,8 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
+
+#define numa_demotion_enabled false
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
@@ -119,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
*/
#define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1)
-#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3)
#define MIGRATE_PFN_SHIFT 6
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index 883c99249033..f37cc03f9369 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -19,4 +19,17 @@ enum migrate_mode {
MIGRATE_SYNC_NO_COPY,
};
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
+ MR_DEMOTION,
+ MR_TYPES
+};
+
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index da2367e2ac1e..c238207d1615 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -36,7 +36,7 @@ struct misc_cg;
struct misc_res {
unsigned long max;
atomic_long_t usage;
- bool failed;
+ atomic_long_t events;
};
/**
@@ -46,6 +46,10 @@ struct misc_res {
*/
struct misc_cg {
struct cgroup_subsys_state css;
+
+ /* misc.events */
+ struct cgroup_file events_file;
+
struct misc_res res[MISC_CG_RES_TYPES];
};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 30bb59fe970c..6646634a0b9d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1436,7 +1436,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index a858bcb6220b..1834c8fad12e 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -92,26 +92,4 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
-static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
-{
- int i;
-
- for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac & 0xFF;
- mac >>= 8;
- }
-}
-
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 66eaf0aa7f69..9c25edfd59a6 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -290,6 +290,7 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
+#define MLX5_UMR_KLM_ALIGNMENT 4
#define MLX5_UMR_MTT_ALIGNMENT 0x40
#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
@@ -541,19 +542,21 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-enum mlx5_fatal_assert_bit_offsets {
- MLX5_RFR_OFFSET = 31,
+enum mlx5_rfr_severity_bit_offsets {
+ MLX5_RFR_BIT_OFFSET = 0x7,
};
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rfr;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
@@ -577,7 +580,9 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
@@ -795,10 +800,23 @@ struct mlx5_cqe64 {
u8 tls_outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id;
- u8 lro_tcppsh_abort_dupack;
- u8 lro_min_ttl;
- __be16 lro_tcp_win;
- __be32 lro_ack_seq_num;
+ union {
+ struct {
+ u8 tcppsh_abort_dupack;
+ u8 min_ttl;
+ __be16 tcp_win;
+ __be32 ack_seq_num;
+ } lro;
+ struct {
+ u8 reserved0:1;
+ u8 match:1;
+ u8 flush:1;
+ u8 reserved3:5;
+ u8 header_size;
+ __be16 header_entry_index;
+ __be32 data_offset;
+ } shampo;
+ };
__be32 rss_hash_result;
u8 rss_hash_type;
u8 ml_path;
@@ -868,7 +886,7 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
- return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+ return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
}
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
@@ -1182,7 +1200,9 @@ enum mlx5_cap_type {
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
+ MLX5_CAP_DEV_SHAMPO = 0x1d,
MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1340,6 +1360,20 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
@@ -1412,6 +1446,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_IPSEC(mdev, cap)\
MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
+ MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1456,6 +1493,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e23417424373..a623ec635947 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -59,15 +59,13 @@
#define MLX5_ADEV_NAME "mlx5_core"
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -136,6 +134,7 @@ enum {
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
MLX5_REG_MTRC_CAP = 0x9040,
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
@@ -154,6 +153,7 @@ enum {
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_DTOR = 0xC00E,
};
enum mlx5_qpts_trust_state {
@@ -357,22 +357,6 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
- MLX5_MKEY_INDIRECT_DEVX,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
- struct wait_queue_head wait;
- refcount_t usecount;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@ -441,6 +425,7 @@ struct mlx5_core_health {
struct work_struct report_work;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
struct mlx5_qp_table {
@@ -653,7 +638,7 @@ struct mlx5e_resources {
struct mlx5e_hw_objs {
u32 pdn;
struct mlx5_td td;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
struct mlx5_sq_bfreg bfreg;
} hw_objs;
struct devlink_port dl_port;
@@ -752,6 +737,7 @@ struct mlx5_core_dev {
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@ -1005,8 +991,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
@@ -1024,13 +1008,11 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
@@ -1138,7 +1120,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
@@ -1243,6 +1224,16 @@ static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
return MLX5_CAP_GEN(dev, native_port_num);
}
+static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+{
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
@@ -1251,11 +1242,12 @@ static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
{
struct devlink *devlink = priv_to_devlink(dev);
union devlink_param_value val;
+ int err;
- devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return val.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+ return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
index cea6ecb4b73e..ea3ff5a8ced3 100644
--- a/include/linux/mlx5/eq.h
+++ b/include/linux/mlx5/eq.h
@@ -4,7 +4,6 @@
#ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H
-#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80)
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 4ab5c1fc1270..97afcea39a7b 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
/* 0x7FF is a reserved mapping */
#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
+#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
+ ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
+ GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
+ ESW_TUN_OPTS_OFFSET + 1)
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 0106c67e8ccb..cd2d4c572367 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -83,6 +83,9 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
};
enum {
@@ -97,6 +100,7 @@ enum {
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
+struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@ -241,6 +245,10 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+
+/* As mlx5_fc_create() but doesn't queue stats refresh thread. */
+struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
@@ -257,6 +265,13 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
struct mlx5_pkt_reformat_params {
int type;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 993204a6c1a1..3636df90899a 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -94,6 +94,7 @@ enum {
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@ -342,7 +343,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
- u8 reserved_at_1e[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@ -393,6 +394,14 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 metadata_reg_c_0[0x1];
};
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 reserved_at_0[0xe];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x11];
+
+ u8 reserved_at_20[0x60];
+};
+
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
u8 reserved_at_1[0x1];
@@ -539,7 +548,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
u8 reserved_at_d8[0x7];
@@ -756,7 +765,15 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200];
+ u8 reserved_at_e00[0x700];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@ -767,6 +784,18 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_at_20c0[0x5f40];
};
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+};
+
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@@ -1306,7 +1335,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1];
- u8 reserved_at_21[0x2];
+ u8 reserved_at_21[0x1];
+ u8 dtor[0x1];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
@@ -1336,7 +1366,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_b0[0x1];
u8 uplink_follow[0x1];
u8 ts_cqe_to_dest_cqn[0x1];
- u8 reserved_at_b3[0xd];
+ u8 reserved_at_b3[0x7];
+ u8 shampo[0x1];
+ u8 reserved_at_bb[0x5];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
@@ -1514,7 +1546,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
- u8 reserved_at_248[0x2];
+ u8 port_selection_cap[0x1];
+ u8 reserved_at_248[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
@@ -1587,7 +1620,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
+ u8 roce_rw_supported[0x1];
+ u8 reserved_at_3a2[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1716,7 +1750,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10];
+ u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4];
@@ -1731,7 +1765,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_760[0x20];
u8 vhca_tunnel_commands[0x40];
- u8 reserved_at_7c0[0x40];
+ u8 match_definer_format_supported[0x40];
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
@@ -1750,6 +1784,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
@@ -1876,7 +1911,21 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0];
+ u8 reserved_at_140[0x80];
+
+ u8 headers_mkey[0x20];
+
+ u8 shampo_enable[0x1];
+ u8 reserved_at_1e1[0x4];
+ u8 log_reservation_size[0x3];
+ u8 reserved_at_1e8[0x5];
+ u8 log_max_num_of_packets_per_reservation[0x3];
+ u8 reserved_at_1f0[0x6];
+ u8 log_headers_entry_size[0x2];
+ u8 reserved_at_1f8[0x4];
+ u8 log_headers_buffer_entry_num[0x4];
+
+ u8 reserved_at_200[0x400];
struct mlx5_ifc_cmd_pas_bits pas[];
};
@@ -2807,6 +2856,40 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ u8 reserved_at_1c0[0x40];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -3118,6 +3201,20 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
+struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 reserved_at_20[0x3];
+ u8 shampo_max_log_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
@@ -3128,6 +3225,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
@@ -3135,6 +3233,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_shampo_cap_bits shampo_cap;
u8 reserved_at_0[0x8000];
};
@@ -3309,8 +3408,9 @@ enum {
};
enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
+ MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2),
};
enum {
@@ -3335,7 +3435,7 @@ struct mlx5_ifc_tirc_bits {
u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
+ u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40];
@@ -3517,6 +3617,18 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2,
+};
+
+enum {
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2,
+};
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
@@ -3549,7 +3661,13 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c0[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_e0[0xa0];
+ u8 reserved_at_e0[0x46];
+ u8 shampo_no_match_alignment_granularity[0x2];
+ u8 reserved_at_128[0x6];
+ u8 shampo_match_criteria_type[0x2];
+ u8 reservation_timeout[0x10];
+
+ u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq;
};
@@ -4097,13 +4215,19 @@ struct mlx5_ifc_health_buffer_bits {
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@ -5616,6 +5740,236 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
+struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+};
+
+struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x160];
+
+ u8 match_mask[16][0x20];
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+};
+
+struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@ -6369,7 +6723,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
- u8 lro[0x1];
+ u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
@@ -7569,7 +7923,7 @@ struct mlx5_ifc_dealloc_uar_out_bits {
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -8090,6 +8444,11 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+};
+
+enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
@@ -8110,7 +8469,9 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8125,7 +8486,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@ -8416,7 +8780,7 @@ struct mlx5_ifc_alloc_uar_out_bits {
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -10060,6 +10424,17 @@ struct mlx5_ifc_pddr_reg_bits {
union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
};
+struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -10121,6 +10496,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
u8 reserved_at_0[0x60e0];
};
@@ -10398,9 +10774,16 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
+enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT,
+};
+
struct mlx5_ifc_lagc_bits {
u8 fdb_selection_mode[0x1];
- u8 reserved_at_1[0x1c];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
u8 reserved_at_20[0x14];
@@ -10614,29 +10997,6 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 vhca_tunnel_id[0x10];
- u8 obj_type[0x10];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 73a52aba448f..a7e4a9e7d807 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -36,10 +36,7 @@
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
-struct file_ra_state;
struct user_struct;
-struct writeback_control;
-struct bdi_writeback;
struct pt_regs;
extern int sysctl_page_lock_unfairness;
@@ -216,13 +213,6 @@ int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
-/*
- * Any attempt to mark this function as static leads to build failure
- * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked()
- * is referred to by BPF code. This must be visible for error injection.
- */
-int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp, void **shadowp);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
@@ -748,13 +738,18 @@ static inline int put_page_testzero(struct page *page)
return page_ref_dec_and_test(page);
}
+static inline int folio_put_testzero(struct folio *folio)
+{
+ return put_page_testzero(&folio->page);
+}
+
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
* This can be called when MMU is off so it must not access
* any of the virtual mappings.
*/
-static inline int get_page_unless_zero(struct page *page)
+static inline bool get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}
@@ -799,40 +794,6 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kvmalloc(size_t size, gfp_t flags)
-{
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
- return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline void *kvzalloc(size_t size, gfp_t flags)
-{
- return kvmalloc(size, flags | __GFP_ZERO);
-}
-
-static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, size, &bytes)))
- return NULL;
-
- return kvmalloc(bytes, flags);
-}
-
-static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
-{
- return kvmalloc_array(n, size, flags | __GFP_ZERO);
-}
-
-extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize,
- gfp_t flags);
-extern void kvfree(const void *addr);
-extern void kvfree_sensitive(const void *addr, size_t len);
-
static inline int head_compound_mapcount(struct page *head)
{
return atomic_read(compound_mapcount_ptr(head)) + 1;
@@ -907,7 +868,9 @@ void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
-void copy_huge_page(struct page *dst, struct page *src);
+void folio_copy(struct folio *dst, struct folio *src);
+
+unsigned long nr_free_buffer_pages(void);
/*
* Compound pages have a destructor function. Provide a
@@ -950,6 +913,20 @@ static inline unsigned int compound_order(struct page *page)
return page[1].compound_order;
}
+/**
+ * folio_order - The allocation order of a folio.
+ * @folio: The folio.
+ *
+ * A folio is composed of 2^order pages. See get_order() for the definition
+ * of order.
+ *
+ * Return: The order of the folio.
+ */
+static inline unsigned int folio_order(struct folio *folio)
+{
+ return compound_order(&folio->page);
+}
+
static inline bool hpage_pincount_available(struct page *page)
{
/*
@@ -1131,6 +1108,11 @@ static inline enum zone_type page_zonenum(const struct page *page)
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}
+static inline enum zone_type folio_zonenum(const struct folio *folio)
+{
+ return page_zonenum(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
static inline bool is_zone_device_page(const struct page *page)
{
@@ -1200,18 +1182,26 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
}
/* 127: arbitrary random number, small enough to assemble well */
-#define page_ref_zero_or_close_to_overflow(page) \
- ((unsigned int) page_ref_count(page) + 127u <= 127u)
+#define folio_ref_zero_or_close_to_overflow(folio) \
+ ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
+
+/**
+ * folio_get - Increment the reference count on a folio.
+ * @folio: The folio.
+ *
+ * Context: May be called in any context, as long as you know that
+ * you have a refcount on the folio. If you do not already have one,
+ * folio_try_get() may be the right interface for you to use.
+ */
+static inline void folio_get(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
+ folio_ref_inc(folio);
+}
static inline void get_page(struct page *page)
{
- page = compound_head(page);
- /*
- * Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_refcount.
- */
- VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
- page_ref_inc(page);
+ folio_get(page_folio(page));
}
bool __must_check try_grab_page(struct page *page, unsigned int flags);
@@ -1228,9 +1218,28 @@ static inline __must_check bool try_get_page(struct page *page)
return true;
}
+/**
+ * folio_put - Decrement the reference count on a folio.
+ * @folio: The folio.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put() unless you can be sure that it wasn't the
+ * last reference.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put(struct folio *folio)
+{
+ if (folio_put_testzero(folio))
+ __put_page(&folio->page);
+}
+
static inline void put_page(struct page *page)
{
- page = compound_head(page);
+ struct folio *folio = page_folio(page);
/*
* For devmap managed pages we need to catch refcount transition from
@@ -1238,13 +1247,12 @@ static inline void put_page(struct page *page)
* need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/
- if (page_is_devmap_managed(page)) {
- put_devmap_managed_page(page);
+ if (page_is_devmap_managed(&folio->page)) {
+ put_devmap_managed_page(&folio->page);
return;
}
- if (put_page_testzero(page))
- __put_page(page);
+ folio_put(folio);
}
/*
@@ -1379,6 +1387,11 @@ static inline int page_to_nid(const struct page *page)
}
#endif
+static inline int folio_nid(const struct folio *folio)
+{
+ return page_to_nid(&folio->page);
+}
+
#ifdef CONFIG_NUMA_BALANCING
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
@@ -1546,6 +1559,16 @@ static inline pg_data_t *page_pgdat(const struct page *page)
return NODE_DATA(page_to_nid(page));
}
+static inline struct zone *folio_zone(const struct folio *folio)
+{
+ return page_zone(&folio->page);
+}
+
+static inline pg_data_t *folio_pgdat(const struct folio *folio)
+{
+ return page_pgdat(&folio->page);
+}
+
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
@@ -1559,6 +1582,20 @@ static inline unsigned long page_to_section(const struct page *page)
}
#endif
+/**
+ * folio_pfn - Return the Page Frame Number of a folio.
+ * @folio: The folio.
+ *
+ * A folio may contain multiple pages. The pages have consecutive
+ * Page Frame Numbers.
+ *
+ * Return: The Page Frame Number of the first page in the folio.
+ */
+static inline unsigned long folio_pfn(struct folio *folio)
+{
+ return page_to_pfn(&folio->page);
+}
+
/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
#ifdef CONFIG_MIGRATION
static inline bool is_pinnable_page(struct page *page)
@@ -1595,6 +1632,89 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
+/**
+ * folio_nr_pages - The number of pages in the folio.
+ * @folio: The folio.
+ *
+ * Return: A positive power of two.
+ */
+static inline long folio_nr_pages(struct folio *folio)
+{
+ return compound_nr(&folio->page);
+}
+
+/**
+ * folio_next - Move to the next physical folio.
+ * @folio: The folio we're currently operating on.
+ *
+ * If you have physically contiguous memory which may span more than
+ * one folio (eg a &struct bio_vec), use this function to move from one
+ * folio to the next. Do not use it if the memory is only virtually
+ * contiguous as the folios are almost certainly not adjacent to each
+ * other. This is the folio equivalent to writing ``page++``.
+ *
+ * Context: We assume that the folios are refcounted and/or locked at a
+ * higher level and do not adjust the reference counts.
+ * Return: The next struct folio.
+ */
+static inline struct folio *folio_next(struct folio *folio)
+{
+ return (struct folio *)folio_page(folio, folio_nr_pages(folio));
+}
+
+/**
+ * folio_shift - The size of the memory described by this folio.
+ * @folio: The folio.
+ *
+ * A folio represents a number of bytes which is a power-of-two in size.
+ * This function tells you which power-of-two the folio is. See also
+ * folio_size() and folio_order().
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The base-2 logarithm of the size of this folio.
+ */
+static inline unsigned int folio_shift(struct folio *folio)
+{
+ return PAGE_SHIFT + folio_order(folio);
+}
+
+/**
+ * folio_size - The number of bytes in a folio.
+ * @folio: The folio.
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The number of bytes in this folio.
+ */
+static inline size_t folio_size(struct folio *folio)
+{
+ return PAGE_SIZE << folio_order(folio);
+}
+
+#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+static inline int arch_make_page_accessible(struct page *page)
+{
+ return 0;
+}
+#endif
+
+#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+static inline int arch_make_folio_accessible(struct folio *folio)
+{
+ int ret;
+ long i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ ret = arch_make_page_accessible(folio_page(folio, i));
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+#endif
+
/*
* Some inline functions in vmstat.h depend on page_zone()
*/
@@ -1635,19 +1755,6 @@ void page_address_init(void);
extern void *page_rmapping(struct page *page);
extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
-
-extern struct address_space *__page_file_mapping(struct page *);
-
-static inline
-struct address_space *page_file_mapping(struct page *page)
-{
- if (unlikely(PageSwapCache(page)))
- return __page_file_mapping(page);
-
- return page->mapping;
-}
-
extern pgoff_t __page_file_index(struct page *page);
/*
@@ -1662,7 +1769,7 @@ static inline pgoff_t page_index(struct page *page)
}
bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
+bool folio_mapped(struct folio *folio);
/*
* Return true only if the page has been allocated with
@@ -1700,6 +1807,7 @@ extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
+#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
* Flags passed to show_mem() and show_free_areas() to suppress output in
@@ -1721,12 +1829,24 @@ extern void user_shm_unlock(size_t, struct ucounts *);
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
- struct address_space *check_mapping; /* Check page->mapping if set */
- pgoff_t first_index; /* Lowest page->index to unmap */
- pgoff_t last_index; /* Highest page->index to unmap */
+ struct address_space *zap_mapping; /* Check page->mapping if set */
struct page *single_page; /* Locked page to be unmapped */
};
+/*
+ * We set details->zap_mappings when we want to unmap shared but keep private
+ * pages. Return true if skip zapping this page, false otherwise.
+ */
+static inline bool
+zap_skip_check_mapping(struct zap_details *details, struct page *page)
+{
+ if (!details || !page)
+ return false;
+
+ return details->zap_mapping &&
+ (details->zap_mapping != page_rmapping(page));
+}
+
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -1854,20 +1974,9 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
-int redirty_page_for_writepage(struct writeback_control *wbc,
- struct page *page);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
- struct bdi_writeback *wb);
-int set_page_dirty(struct page *page);
+bool folio_mark_dirty(struct folio *folio);
+bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
-void __cancel_dirty_page(struct page *page);
-static inline void cancel_dirty_page(struct page *page)
-{
- /* Avoid atomic ops, locking, etc. when not actually needed. */
- if (PageDirty(page))
- __cancel_dirty_page(page);
-}
-int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -2447,7 +2556,7 @@ static inline unsigned long get_num_physpages(void)
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * memblock_add_node(base, size, nid)
+ * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
void free_area_init(unsigned long *max_zone_pfn);
@@ -2475,6 +2584,7 @@ extern void memmap_init_range(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void);
+extern void calculate_min_free_kbytes(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
@@ -2659,10 +2769,6 @@ extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
-/* mm/page-writeback.c */
-int __must_check write_one_page(struct page *page);
-void task_dirty_inc(struct task_struct *tsk);
-
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -2851,7 +2957,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */
-#define FOLL_POPULATE 0x40 /* fault in page */
+#define FOLL_POPULATE 0x40 /* fault in pages (with FOLL_MLOCK) */
+#define FOLL_NOFAULT 0x80 /* do not fault in pages */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 355ea1ee32bd..e2ec68b0515c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -6,27 +6,33 @@
#include <linux/swap.h>
/**
- * page_is_file_lru - should the page be on a file LRU or anon LRU?
- * @page: the page to test
- *
- * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
- * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
- * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
- * functions that manipulate the LRU lists, to sort a page onto the right LRU
- * list.
+ * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
+ * @folio: The folio to test.
*
* We would like to get this info without a page flag, but the state
- * needs to survive until the page is last deleted from the LRU, which
+ * needs to survive until the folio is last deleted from the LRU, which
* could be as far down as __page_cache_release.
+ *
+ * Return: An integer (not a boolean!) used to sort a folio onto the
+ * right LRU list and to account folios correctly.
+ * 1 if @folio is a regular filesystem backed page cache folio
+ * or a lazily freed anonymous folio (e.g. via MADV_FREE).
+ * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
+ * ram or swap backed folio.
*/
+static inline int folio_is_file_lru(struct folio *folio)
+{
+ return !folio_test_swapbacked(folio);
+}
+
static inline int page_is_file_lru(struct page *page)
{
- return !PageSwapBacked(page);
+ return folio_is_file_lru(page_folio(page));
}
static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -39,69 +45,94 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
}
/**
- * __clear_page_lru_flags - clear page lru flags before releasing a page
- * @page: the page that was on lru and now has a zero reference
+ * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
+ * @folio: The folio that was on lru and now has a zero reference.
*/
-static __always_inline void __clear_page_lru_flags(struct page *page)
+static __always_inline void __folio_clear_lru_flags(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageLRU(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
- __ClearPageLRU(page);
+ __folio_clear_lru(folio);
/* this shouldn't happen, so leave the flags to bad_page() */
- if (PageActive(page) && PageUnevictable(page))
+ if (folio_test_active(folio) && folio_test_unevictable(folio))
return;
- __ClearPageActive(page);
- __ClearPageUnevictable(page);
+ __folio_clear_active(folio);
+ __folio_clear_unevictable(folio);
+}
+
+static __always_inline void __clear_page_lru_flags(struct page *page)
+{
+ __folio_clear_lru_flags(page_folio(page));
}
/**
- * page_lru - which LRU list should a page be on?
- * @page: the page to test
+ * folio_lru_list - Which LRU list should a folio be on?
+ * @folio: The folio to test.
*
- * Returns the LRU list a page should be on, as an index
+ * Return: The LRU list a folio should be on, as an index
* into the array of LRU lists.
*/
-static __always_inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list folio_lru_list(struct folio *folio)
{
enum lru_list lru;
- VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+ VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
- if (PageUnevictable(page))
+ if (folio_test_unevictable(folio))
return LRU_UNEVICTABLE;
- lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
- if (PageActive(page))
+ lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+ if (folio_test_active(folio))
lru += LRU_ACTIVE;
return lru;
}
+static __always_inline
+void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ list_add(&folio->lru, &lruvec->lists[lru]);
+}
+
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec)
{
- enum lru_list lru = page_lru(page);
+ lruvec_add_folio(lruvec, page_folio(page));
+}
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add(&page->lru, &lruvec->lists[lru]);
+static __always_inline
+void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ list_add_tail(&folio->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec)
{
- enum lru_list lru = page_lru(page);
+ lruvec_add_folio_tail(lruvec, page_folio(page));
+}
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add_tail(&page->lru, &lruvec->lists[lru]);
+static __always_inline
+void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ list_del(&folio->lru);
+ update_lru_size(lruvec, folio_lru_list(folio), folio_zonenum(folio),
+ -folio_nr_pages(folio));
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec)
{
- list_del(&page->lru);
- update_lru_size(lruvec, page_lru(page), page_zonenum(page),
- -thp_nr_pages(page));
+ lruvec_del_folio(lruvec, page_folio(page));
}
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f8ee09c711f..bb8c6f5f19bc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/uprobes.h>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
#include <linux/seqlock.h>
@@ -104,18 +105,7 @@ struct page {
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
- union {
- /**
- * dma_addr_upper: might require a 64-bit
- * value on 32-bit architectures.
- */
- unsigned long dma_addr_upper;
- /**
- * For frag page support, not supported in
- * 32-bit architectures with 64-bit DMA.
- */
- atomic_long_t pp_frag_count;
- };
+ atomic_long_t pp_frag_count;
};
struct { /* slab, slob and slub */
union {
@@ -124,10 +114,8 @@ struct page {
struct page *next;
#ifdef CONFIG_64BIT
int pages; /* Nr of pages left */
- int pobjects; /* Approximate count */
#else
short int pages;
- short int pobjects;
#endif
};
};
@@ -239,6 +227,72 @@ struct page {
#endif
} _struct_page_alignment;
+/**
+ * struct folio - Represents a contiguous set of bytes.
+ * @flags: Identical to the page flags.
+ * @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @mapping: The file this page belongs to, or refers to the anon_vma for
+ * anonymous memory.
+ * @index: Offset within the file, in units of pages. For anonymous memory,
+ * this is the index from the beginning of the mmap.
+ * @private: Filesystem per-folio data (see folio_attach_private()).
+ * Used for swp_entry_t if folio_test_swapcache().
+ * @_mapcount: Do not access this member directly. Use folio_mapcount() to
+ * find out how many times this folio is mapped by userspace.
+ * @_refcount: Do not access this member directly. Use folio_ref_count()
+ * to find how many references there are to this folio.
+ * @memcg_data: Memory Control Group data.
+ *
+ * A folio is a physically, virtually and logically contiguous set
+ * of bytes. It is a power-of-two in size, and it is aligned to that
+ * same power-of-two. It is at least as large as %PAGE_SIZE. If it is
+ * in the page cache, it is at a file offset which is a multiple of that
+ * power-of-two. It may be mapped into userspace at an address which is
+ * at an arbitrary page offset, but its kernel virtual address is aligned
+ * to its size.
+ */
+struct folio {
+ /* private: don't document the anon union */
+ union {
+ struct {
+ /* public: */
+ unsigned long flags;
+ struct list_head lru;
+ struct address_space *mapping;
+ pgoff_t index;
+ void *private;
+ atomic_t _mapcount;
+ atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+ /* private: the union with struct page is transitional */
+ };
+ struct page page;
+ };
+};
+
+static_assert(sizeof(struct page) == sizeof(struct folio));
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
+FOLIO_MATCH(flags, flags);
+FOLIO_MATCH(lru, lru);
+FOLIO_MATCH(compound_head, lru);
+FOLIO_MATCH(index, index);
+FOLIO_MATCH(private, private);
+FOLIO_MATCH(_mapcount, _mapcount);
+FOLIO_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+FOLIO_MATCH(memcg_data, memcg_data);
+#endif
+#undef FOLIO_MATCH
+
+static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
+{
+ struct page *tail = &folio->page + 1;
+ return &tail->compound_mapcount;
+}
+
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
return &page[1].compound_mapcount;
@@ -257,6 +311,12 @@ static inline atomic_t *compound_pincount_ptr(struct page *page)
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+/*
+ * page_private can be used on tail pages. However, PagePrivate is only
+ * checked by the VM on the head page. So page_private on the tail pages
+ * should be used for data that's ancillary to the head page (eg attaching
+ * buffer heads to tail pages after attaching buffer heads to the head page)
+ */
#define page_private(page) ((page)->private)
static inline void set_page_private(struct page *page, unsigned long private)
@@ -264,6 +324,11 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private;
}
+static inline void *folio_get_private(struct folio *folio)
+{
+ return folio->private;
+}
+
struct page_frag_cache {
void * va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
@@ -387,17 +452,6 @@ struct vm_area_struct {
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
-struct core_thread {
- struct task_struct *task;
- struct core_thread *next;
-};
-
-struct core_state {
- atomic_t nr_threads;
- struct core_thread dumper;
- struct completion startup;
-};
-
struct kioctx_table;
struct mm_struct {
struct {
@@ -518,8 +572,6 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
-
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct kioctx_table __rcu *ioctx_table;
@@ -572,6 +624,9 @@ struct mm_struct {
bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT
+ struct rcu_head delayed_drop;
+#endif
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0c0c9a0fdf57..7afb57cab00b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -15,7 +15,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -162,6 +162,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Execute HS400 tuning depending host driver */
+ int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
+
/* Prepare switch to DDR during the HS400 init sequence */
int (*hs400_prepare_ddr)(struct mmc_host *host);
@@ -492,7 +495,7 @@ struct mmc_host {
/* Inline encryption support */
#ifdef CONFIG_MMC_CRYPTO
- struct blk_keyslot_manager ksm;
+ struct blk_crypto_profile crypto_profile;
#endif
/* Host Software Queue support */
@@ -634,5 +637,6 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
deleted file mode 100644
index 1d42872d22f3..000000000000
--- a/include/linux/mmc/sdhci-pci-data.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_MMC_SDHCI_PCI_DATA_H
-#define LINUX_MMC_SDHCI_PCI_DATA_H
-
-struct pci_dev;
-
-struct sdhci_pci_data {
- struct pci_dev *pdev;
- int slotno;
- int rst_n_gpio; /* Set to -EINVAL if unused */
- int cd_gpio; /* Set to -EINVAL if unused */
- int (*setup)(struct sdhci_pci_data *data);
- void (*cleanup)(struct sdhci_pci_data *data);
-};
-
-extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
- int slotno);
-#endif
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 1935d4c72d10..d7285f8148a3 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -22,6 +22,13 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
+#define VM_BUG_ON_FOLIO(cond, folio) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_page(&folio->page, "VM_BUG_ON_FOLIO(" __stringify(cond)")");\
+ BUG(); \
+ } \
+ } while (0)
#define VM_BUG_ON_VMA(cond, vma) \
do { \
if (unlikely(cond)) { \
@@ -47,6 +54,17 @@ void dump_mm(const struct mm_struct *mm);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(&folio->page, "VM_WARN_ON_ONCE_FOLIO(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -55,11 +73,13 @@ void dump_mm(const struct mm_struct *mm);
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_FOLIO(cond, folio) VM_BUG_ON(cond)
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6a1d79d84675..58e744b78c2c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -199,6 +199,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
@@ -272,6 +273,13 @@ enum lru_list {
NR_LRU_LISTS
};
+enum vmscan_throttle_state {
+ VMSCAN_THROTTLE_WRITEBACK,
+ VMSCAN_THROTTLE_ISOLATED,
+ VMSCAN_THROTTLE_NOPROGRESS,
+ NR_VMSCAN_THROTTLE,
+};
+
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
@@ -841,6 +849,13 @@ typedef struct pglist_data {
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
+
+ /* workqueues for throttling reclaim for different reasons. */
+ wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
+
+ atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
+ unsigned long nr_reclaim_start; /* nr pages written while throttled
+ * when throttling started. */
struct task_struct *kswapd; /* Protected by
mem_hotplug_begin/end() */
int kswapd_order;
@@ -1220,6 +1235,28 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+/* Whether the 'nodes' are all movable nodes */
+static inline bool movable_only_nodes(nodemask_t *nodes)
+{
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ int nid;
+
+ if (nodes_empty(*nodes))
+ return false;
+
+ /*
+ * We can chose arbitrary node from the nodemask to get a
+ * zonelist as they are interlinked. We just need to find
+ * at least one zone that can satisfy kernel allocations.
+ */
+ nid = first_node(*nodes);
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
+ return (!z->zone) ? true : false;
+}
+
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif
@@ -1481,7 +1518,7 @@ static inline int pfn_valid(unsigned long pfn)
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- ms = __nr_to_section(pfn_to_section_nr(pfn));
+ ms = __pfn_to_section(pfn);
if (!valid_section(ms))
return 0;
/*
@@ -1496,7 +1533,7 @@ static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ return present_section(__pfn_to_section(pfn));
}
static inline unsigned long next_present_section_nr(unsigned long section_nr)
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 49cf6eb222e7..e616f94c7c58 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -148,7 +148,7 @@ struct msi_desc {
u8 is_msix : 1;
u8 multiple : 3;
u8 multi_cap : 3;
- u8 maskbit : 1;
+ u8 can_mask : 1;
u8 is_64 : 1;
u8 is_virtual : 1;
u16 entry_nr;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 88227044fc86..f5e7dfc2e4e9 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -72,8 +72,6 @@ struct mtd_oob_ops {
uint8_t *oobbuf;
};
-#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5fc6bb2fefad..7a09b040ac39 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -16,10 +16,25 @@ struct device;
struct mux_control;
unsigned int mux_control_states(struct mux_control *mux);
-int __must_check mux_control_select(struct mux_control *mux,
- unsigned int state);
-int __must_check mux_control_try_select(struct mux_control *mux,
- unsigned int state);
+int __must_check mux_control_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_control_try_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+
+static inline int __must_check mux_control_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_control_try_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_try_select_delay(mux, state, 0);
+}
+
int mux_control_deselect(struct mux_control *mux);
struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 627a2c6bc02d..18824064f8c0 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -12,6 +12,7 @@
#include <dt-bindings/mux/mux.h>
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/semaphore.h>
struct mux_chip;
@@ -33,6 +34,7 @@ struct mux_control_ops {
* @states: The number of mux controller states.
* @idle_state: The mux controller state to use when inactive, or one
* of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT.
+ * @last_change: Timestamp of last change
*
* Mux drivers may only change @states and @idle_state, and may only do so
* between allocation and registration of the mux controller. Specifically,
@@ -47,6 +49,8 @@ struct mux_control {
unsigned int states;
int idle_state;
+
+ ktime_t last_change;
};
/**
diff --git a/include/linux/nd.h b/include/linux/nd.h
index ee9ad76afbba..8a8c63edb1b2 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -88,7 +88,7 @@ struct nd_namespace_pmem {
struct nd_namespace_io nsio;
unsigned long lbasize;
char *alt_name;
- u8 *uuid;
+ uuid_t *uuid;
int id;
};
@@ -105,7 +105,7 @@ struct nd_namespace_pmem {
struct nd_namespace_blk {
struct nd_namespace_common common;
char *alt_name;
- u8 *uuid;
+ uuid_t *uuid;
int id;
unsigned long lbasize;
resource_size_t size;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d79163208dfd..3ec42495a43a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1861,6 +1861,7 @@ enum netdev_ml_priv_type {
* @xps_maps: XXX: need comments on this one
* @miniq_egress: clsact qdisc specific data for
* egress processing
+ * @nf_hooks_egress: netfilter hooks executed for egress packets
* @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
@@ -1916,7 +1917,6 @@ enum netdev_ml_priv_type {
* @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
@@ -2161,6 +2161,9 @@ struct net_device {
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc __rcu *miniq_egress;
#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
+#endif
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
@@ -2250,7 +2253,6 @@ struct net_device {
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
- struct lock_class_key *qdisc_running_key;
bool proto_down;
unsigned wol_enabled:1;
unsigned threaded:1;
@@ -2360,13 +2362,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
#define netdev_lockdep_set_classes(dev) \
{ \
static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
static struct lock_class_key dev_addr_list_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- (dev)->qdisc_running_key = &qdisc_running_key; \
lockdep_set_class(&(dev)->addr_list_lock, \
&dev_addr_list_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
@@ -2955,6 +2955,7 @@ struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
+bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
@@ -4642,7 +4643,7 @@ void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
static inline void
-__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len)
+__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
{
memcpy(dev->dev_addr, addr, len);
}
@@ -4654,7 +4655,7 @@ static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
static inline void
dev_addr_mod(struct net_device *dev, unsigned int offset,
- const u8 *addr, size_t len)
+ const void *addr, size_t len)
{
memcpy(&dev->dev_addr[offset], addr, len);
}
@@ -4800,8 +4801,6 @@ struct netdev_nested_priv {
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);
@@ -5236,7 +5235,7 @@ static inline void netif_keep_dst(struct net_device *dev)
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
{
/* TODO: reserve and use an additional IFF bit, if we get more users */
- return dev->priv_flags & IFF_MACSEC;
+ return netif_is_macsec(dev);
}
extern struct pernet_operations __net_initdata loopback_net_ops;
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 4f9a4b3c5892..a40aaf645fa4 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -54,9 +54,8 @@ int arpt_register_table(struct net *net, const struct xt_table *table,
const struct nf_hook_ops *ops);
void arpt_unregister_table(struct net *net, const char *name);
void arpt_unregister_table_pre_exit(struct net *net, const char *name);
-extern unsigned int arpt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 10a01978bc0d..a13296d6c7ce 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -112,9 +112,8 @@ extern int ebt_register_table(struct net *net,
const struct nf_hook_ops *ops);
extern void ebt_unregister_table(struct net *net, const char *tablename);
void ebt_unregister_table_pre_exit(struct net *net, const char *tablename);
-extern unsigned int ebt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct ebt_table *table);
+extern unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
deleted file mode 100644
index a13774be2eb5..000000000000
--- a/include/linux/netfilter_ingress.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NETFILTER_INGRESS_H_
-#define _NETFILTER_INGRESS_H_
-
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-
-#ifdef CONFIG_NETFILTER_INGRESS
-static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
-{
-#ifdef CONFIG_JUMP_LABEL
- if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
- return false;
-#endif
- return rcu_access_pointer(skb->dev->nf_hooks_ingress);
-}
-
-/* caller must hold rcu_read_lock */
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
- struct nf_hook_state state;
- int ret;
-
- /* Must recheck the ingress hook head, in the event it became NULL
- * after the check in nf_hook_ingress_active evaluated to true.
- */
- if (unlikely(!e))
- return 0;
-
- nf_hook_state_init(&state, NF_NETDEV_INGRESS,
- NFPROTO_NETDEV, skb->dev, NULL, NULL,
- dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e, 0);
- if (ret == 0)
- return -1;
-
- return ret;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev)
-{
- RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
-}
-#else /* CONFIG_NETFILTER_INGRESS */
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev) {}
-#endif /* CONFIG_NETFILTER_INGRESS */
-#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 8d09bfe850dc..132b0e4a6d4d 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -63,9 +63,9 @@ struct ipt_error {
}
extern void *ipt_alloc_initial_table(const struct xt_table *);
-extern unsigned int ipt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ipt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 79e73fd7d965..8b8885a73c76 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -29,9 +29,8 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct nf_hook_ops *ops);
void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
void ip6t_unregister_table_exit(struct net *net, const char *name);
-extern unsigned int ip6t_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
new file mode 100644
index 000000000000..b71b57a83bb4
--- /dev/null
+++ b/include/linux/netfilter_netdev.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NETFILTER_NETDEV_H_
+#define _NETFILTER_NETDEV_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
+}
+
+/* caller must hold rcu_read_lock */
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_state state;
+ int ret;
+
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+ if (ret == 0)
+ return -1;
+
+ return ret;
+}
+
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+#endif /* CONFIG_NETFILTER_INGRESS */
+
+#ifdef CONFIG_NETFILTER_EGRESS
+static inline bool nf_hook_egress_active(void)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
+ return false;
+#endif
+ return true;
+}
+
+/**
+ * nf_hook_egress - classify packets before transmission
+ * @skb: packet to be classified
+ * @rc: result code which shall be returned by __dev_queue_xmit() on failure
+ * @dev: netdev whose egress hooks shall be applied to @skb
+ *
+ * Returns @skb on success or %NULL if the packet was consumed or filtered.
+ * Caller must hold rcu_read_lock.
+ *
+ * On ingress, packets are classified first by tc, then by netfilter.
+ * On egress, the order is reversed for symmetry. Conceptually, tc and
+ * netfilter can be thought of as layers, with netfilter layered above tc:
+ * When tc redirects a packet to another interface, netfilter is not applied
+ * because the packet is on the tc layer.
+ *
+ * The nf_skip_egress flag controls whether netfilter is applied on egress.
+ * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
+ * packet passes through tc and netfilter. Because __dev_queue_xmit() may be
+ * called recursively by tunnel drivers such as vxlan, the flag is reverted to
+ * false after sch_handle_egress(). This ensures that netfilter is applied
+ * both on the overlay and underlying network.
+ */
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ struct nf_hook_entries *e;
+ struct nf_hook_state state;
+ int ret;
+
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ if (skb->nf_skip_egress)
+ return skb;
+#endif
+
+ e = rcu_dereference(dev->nf_hooks_egress);
+ if (!e)
+ return skb;
+
+ nf_hook_state_init(&state, NF_NETDEV_EGRESS,
+ NFPROTO_NETDEV, dev, NULL, NULL,
+ dev_net(dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+
+ if (ret == 1) {
+ return skb;
+ } else if (ret < 0) {
+ *rc = NET_XMIT_DROP;
+ return NULL;
+ } else { /* ret == 0 */
+ *rc = NET_XMIT_SUCCESS;
+ return NULL;
+ }
+}
+#else /* CONFIG_NETFILTER_EGRESS */
+static inline bool nf_hook_egress_active(void)
+{
+ return false;
+}
+
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ return skb;
+}
+#endif /* CONFIG_NETFILTER_EGRESS */
+
+static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
+{
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ skb->nf_skip_egress = skip;
+#endif
+}
+
+static inline void nf_hook_netdev_init(struct net_device *dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
+#endif
+}
+
+#endif /* _NETFILTER_NETDEV_H_ */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 5d6a4158a9a6..ca0683b9e3d1 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -22,6 +22,7 @@
* Overload PG_private_2 to give us PG_fscache - this is used to indicate that
* a page is currently backed by a local disk cache
*/
+#define folio_test_fscache(folio) folio_test_private_2(folio)
#define PageFsCache(page) PagePrivate2((page))
#define SetPageFsCache(page) SetPagePrivate2((page))
#define ClearPageFsCache(page) ClearPagePrivate2((page))
@@ -29,60 +30,80 @@
#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
/**
- * set_page_fscache - Set PG_fscache on a page and take a ref
- * @page: The page.
+ * folio_start_fscache - Start an fscache write on a folio.
+ * @folio: The folio.
*
- * Set the PG_fscache (PG_private_2) flag on a page and take the reference
- * needed for the VM to handle its lifetime correctly. This sets the flag and
- * takes the reference unconditionally, so care must be taken not to set the
- * flag again if it's already set.
+ * Call this function before writing a folio to a local cache. Starting a
+ * second write before the first one finishes is not allowed.
*/
-static inline void set_page_fscache(struct page *page)
+static inline void folio_start_fscache(struct folio *folio)
{
- set_page_private_2(page);
+ VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio);
+ folio_get(folio);
+ folio_set_private_2(folio);
}
/**
- * end_page_fscache - Clear PG_fscache and release any waiters
- * @page: The page
- *
- * Clear the PG_fscache (PG_private_2) bit on a page and wake up any sleepers
- * waiting for this. The page ref held for PG_private_2 being set is released.
+ * folio_end_fscache - End an fscache write on a folio.
+ * @folio: The folio.
*
- * This is, for example, used when a netfs page is being written to a local
- * disk cache, thereby allowing writes to the cache for the same page to be
- * serialised.
+ * Call this function after the folio has been written to the local cache.
+ * This will wake any sleepers waiting on this folio.
*/
-static inline void end_page_fscache(struct page *page)
+static inline void folio_end_fscache(struct folio *folio)
{
- end_page_private_2(page);
+ folio_end_private_2(folio);
}
/**
- * wait_on_page_fscache - Wait for PG_fscache to be cleared on a page
- * @page: The page to wait on
+ * folio_wait_fscache - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
*
- * Wait for PG_fscache (aka PG_private_2) to be cleared on a page.
+ * If this folio is currently being written to a local cache, wait for
+ * the write to finish. Another write may start after this one finishes,
+ * unless the caller holds the folio lock.
*/
-static inline void wait_on_page_fscache(struct page *page)
+static inline void folio_wait_fscache(struct folio *folio)
{
- wait_on_page_private_2(page);
+ folio_wait_private_2(folio);
}
/**
- * wait_on_page_fscache_killable - Wait for PG_fscache to be cleared on a page
- * @page: The page to wait on
+ * folio_wait_fscache_killable - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
*
- * Wait for PG_fscache (aka PG_private_2) to be cleared on a page or until a
- * fatal signal is received by the calling task.
+ * If this folio is currently being written to a local cache, wait
+ * for the write to finish or for a fatal signal to be received.
+ * Another write may start after this one finishes, unless the caller
+ * holds the folio lock.
*
* Return:
* - 0 if successful.
* - -EINTR if a fatal signal was encountered.
*/
+static inline int folio_wait_fscache_killable(struct folio *folio)
+{
+ return folio_wait_private_2_killable(folio);
+}
+
+static inline void set_page_fscache(struct page *page)
+{
+ folio_start_fscache(page_folio(page));
+}
+
+static inline void end_page_fscache(struct page *page)
+{
+ folio_end_private_2(page_folio(page));
+}
+
+static inline void wait_on_page_fscache(struct page *page)
+{
+ folio_wait_private_2(page_folio(page));
+}
+
static inline int wait_on_page_fscache_killable(struct page *page)
{
- return wait_on_page_private_2_killable(page);
+ return folio_wait_private_2_killable(page_folio(page));
}
enum netfs_read_source {
@@ -145,13 +166,13 @@ struct netfs_read_request {
short error; /* 0 or error that occurred */
loff_t i_size; /* Size of the file */
loff_t start; /* Start position */
- pgoff_t no_unlock_page; /* Don't unlock this page after read */
+ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
refcount_t usage;
unsigned long flags;
#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
#define NETFS_RREQ_WRITE_TO_CACHE 1 /* Need to write to the cache */
-#define NETFS_RREQ_NO_UNLOCK_PAGE 2 /* Don't unlock no_unlock_page on completion */
-#define NETFS_RREQ_DONT_UNLOCK_PAGES 3 /* Don't unlock the pages on completion */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
const struct netfs_read_request_ops *netfs_ops;
@@ -169,7 +190,7 @@ struct netfs_read_request_ops {
void (*issue_op)(struct netfs_read_subrequest *subreq);
bool (*is_still_valid)(struct netfs_read_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
- struct page *page, void **_fsdata);
+ struct folio *folio, void **_fsdata);
void (*done)(struct netfs_read_request *rreq);
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
};
@@ -219,11 +240,11 @@ extern void netfs_readahead(struct readahead_control *,
const struct netfs_read_request_ops *,
void *);
extern int netfs_readpage(struct file *,
- struct page *,
+ struct folio *,
const struct netfs_read_request_ops *,
void *);
extern int netfs_write_begin(struct file *, struct address_space *,
- loff_t, unsigned int, unsigned int, struct page **,
+ loff_t, unsigned int, unsigned int, struct folio **,
void **,
const struct netfs_read_request_ops *,
void *);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 61b1c7fcc401..1ec631838af9 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -156,10 +156,6 @@ bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
-int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
- __u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 15004c469807..5662d8be04eb 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -292,6 +292,10 @@ enum nfsstat4 {
NFS4ERR_XATTR2BIG = 10096,
};
+/* error codes for internal client use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+
static inline bool seqid_mutating_err(u32 err)
{
/* See RFC 7530, section 9.1.7 */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index b9a8b925db43..05f249f20f55 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -81,7 +81,7 @@ struct nfs_open_context {
fl_owner_t flock_owner;
struct dentry *dentry;
const struct cred *cred;
- struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */
+ struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */
struct nfs4_state *state;
fmode_t mode;
@@ -103,6 +103,7 @@ struct nfs_open_dir_context {
__be32 verf[NFS_DIR_VERIFIER_SIZE];
__u64 dir_cookie;
__u64 dup_cookie;
+ pgoff_t page_index;
signed char duped;
};
@@ -154,36 +155,39 @@ struct nfs_inode {
unsigned long attrtimeo_timestamp;
unsigned long attr_gencount;
- /* "Generation counter" for the attribute cache. This is
- * bumped whenever we update the metadata on the
- * server.
- */
- unsigned long cache_change_attribute;
struct rb_root access_cache;
struct list_head access_cache_entry_lru;
struct list_head access_cache_inode_lru;
- /*
- * This is the cookie verifier used for NFSv3 readdir
- * operations
- */
- __be32 cookieverf[NFS_DIR_VERIFIER_SIZE];
-
- atomic_long_t nrequests;
- struct nfs_mds_commit_info commit_info;
+ union {
+ /* Directory */
+ struct {
+ /* "Generation counter" for the attribute cache.
+ * This is bumped whenever we update the metadata
+ * on the server.
+ */
+ unsigned long cache_change_attribute;
+ /*
+ * This is the cookie verifier used for NFSv3 readdir
+ * operations
+ */
+ __be32 cookieverf[NFS_DIR_VERIFIER_SIZE];
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
+ struct rw_semaphore rmdir_sem;
+ };
+ /* Regular file */
+ struct {
+ atomic_long_t nrequests;
+ struct nfs_mds_commit_info commit_info;
+ struct mutex commit_mutex;
+ };
+ };
/* Open contexts for shared mmap writes */
struct list_head open_files;
- /* Readers: in-flight sillydelete RPC calls */
- /* Writers: rmdir */
- struct rw_semaphore rmdir_sem;
- struct mutex commit_mutex;
-
- /* track last access to cached pages */
- unsigned long page_index;
-
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */
@@ -272,6 +276,7 @@ struct nfs4_copy_state {
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
+#define NFS_INO_FORCE_READDIR (7) /* force readdirplus */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
@@ -383,7 +388,7 @@ extern void nfs_zap_caches(struct inode *);
extern void nfs_set_inode_stale(struct inode *inode);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *);
struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
@@ -404,8 +409,7 @@ extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *map
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
extern int nfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
-extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode);
@@ -421,9 +425,22 @@ extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
extern unsigned long nfs_inc_attr_generation_counter(void);
extern struct nfs_fattr *nfs_alloc_fattr(void);
+extern struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server);
+
+static inline void nfs4_label_free(struct nfs4_label *label)
+{
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (label) {
+ kfree(label->label);
+ kfree(label);
+ }
+#endif
+}
static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
{
+ if (fattr)
+ nfs4_label_free(fattr->label);
kfree(fattr);
}
@@ -511,10 +528,9 @@ extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
extern void nfs_clear_verifier_delegated(struct inode *inode);
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
- struct nfs_fh *fh, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+ struct nfs_fh *fh, struct nfs_fattr *fattr);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
- struct nfs_fattr *fattr, struct nfs4_label *label);
+ struct nfs_fattr *fattr);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res,
@@ -569,11 +585,14 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
extern void nfs_commit_free(struct nfs_commit_data *data);
+bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
static inline int
nfs_have_writebacks(struct inode *inode)
{
- return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ if (S_ISREG(inode->i_mode))
+ return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ return 0;
}
/*
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index e9698b6278a5..967a0098f0a9 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -488,7 +488,6 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
- struct nfs4_label *f_label;
struct nfs_seqid * seqid;
const struct nfs_server *server;
fmode_t delegation_type;
@@ -753,7 +752,6 @@ struct nfs_entry {
int eof;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
unsigned char d_type;
struct nfs_server * server;
};
@@ -834,7 +832,6 @@ struct nfs_getaclres {
struct nfs_setattrres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
const struct nfs_server * server;
};
@@ -1041,7 +1038,6 @@ struct nfs4_create_res {
const struct nfs_server * server;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info dir_cinfo;
};
@@ -1066,7 +1062,6 @@ struct nfs4_getattr_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
};
struct nfs4_link_arg {
@@ -1081,7 +1076,6 @@ struct nfs4_link_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
};
@@ -1098,7 +1092,6 @@ struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
- struct nfs4_label *label;
};
struct nfs4_lookupp_arg {
@@ -1112,7 +1105,6 @@ struct nfs4_lookupp_res {
const struct nfs_server *server;
struct nfs_fattr *fattr;
struct nfs_fh *fh;
- struct nfs4_label *label;
};
struct nfs4_lookup_root_arg {
@@ -1738,15 +1730,13 @@ struct nfs_rpc_ops {
int (*submount) (struct fs_context *, struct nfs_server *);
int (*try_get_tree) (struct fs_context *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *,
- struct inode *);
+ struct nfs_fattr *, struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
int (*lookup) (struct inode *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *,
- struct nfs4_label *);
+ struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *);
int (*access) (struct inode *, struct nfs_access_entry *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
diff --git a/include/linux/node.h b/include/linux/node.h
index 8e5a29897936..bb21fd631b16 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -85,7 +85,7 @@ struct node {
struct device dev;
struct list_head access_list;
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
struct work_struct node_work;
#endif
#ifdef CONFIG_HMEM_REPORTING
@@ -98,7 +98,7 @@ struct memory_block;
extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *);
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
void link_mem_sections(int nid, unsigned long start_pfn,
unsigned long end_pfn,
enum meminit_context context);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 2a38f2b477a5..cb909edb76c4 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -7,6 +7,7 @@
#define _NVME_FC_DRIVER_H 1
#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
/*
@@ -497,6 +498,8 @@ struct nvme_fc_port_template {
int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *rport,
struct nvmefc_ls_rsp *ls_rsp);
+ void (*map_queues)(struct nvme_fc_local_port *localport,
+ struct blk_mq_queue_map *map);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -779,6 +782,10 @@ struct nvmet_fc_target_port {
* LS received.
* Entrypoint is Mandatory.
*
+ * @map_queues: This functions lets the driver expose the queue mapping
+ * to the block layer.
+ * Entrypoint is Optional.
+ *
* @fcp_op: Called to perform a data transfer or transmit a response.
* The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* exchange structure specified in the nvmet_fc_rcv_fcp_req() call
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index 3ec8e50efa16..4dd7e6fe92fb 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H
+#define NVME_RDMA_MAX_QUEUE_SIZE 128
+
enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0,
};
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index b7c4c4130b65..855dd9b3e84b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -27,8 +27,20 @@
#define NVME_NSID_ALL 0xffffffff
enum nvme_subsys_type {
- NVME_NQN_DISC = 1, /* Discovery type target subsystem */
- NVME_NQN_NVME = 2, /* NVME type target subsystem */
+ /* Referral to another discovery type target subsystem */
+ NVME_NQN_DISC = 1,
+
+ /* NVME type target subsystem */
+ NVME_NQN_NVME = 2,
+
+ /* Current discovery type target subsystem */
+ NVME_NQN_CURR = 3,
+};
+
+enum nvme_ctrl_type {
+ NVME_CTRL_IO = 1, /* I/O controller */
+ NVME_CTRL_DISC = 2, /* Discovery controller */
+ NVME_CTRL_ADMIN = 3, /* Administrative controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
@@ -244,7 +256,9 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[28];
+ __u8 rsvd100[11];
+ __u8 cntrltype;
+ __u8 fguid[16];
__le16 crdt1;
__le16 crdt2;
__le16 crdt3;
@@ -312,6 +326,7 @@ struct nvme_id_ctrl {
};
enum {
+ NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
NVME_CTRL_CMIC_ANA = 1 << 3,
NVME_CTRL_ONCS_COMPARE = 1 << 0,
@@ -1303,6 +1318,12 @@ struct nvmf_common_command {
#define MAX_DISC_LOGS 255
+/* Discovery log page entry flags (EFLAGS): */
+enum {
+ NVME_DISC_EFLAGS_EPCSD = (1 << 1),
+ NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
+};
+
/* Discovery log page entry */
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
@@ -1312,7 +1333,8 @@ struct nvmf_disc_rsp_page_entry {
__le16 portid;
__le16 cntlid;
__le16 asqsz;
- __u8 resv8[22];
+ __le16 eflags;
+ __u8 resv10[20];
char trsvcid[NVMF_TRSVCID_SIZE];
__u8 resv64[192];
char subnqn[NVMF_NQN_FIELD_LEN];
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 104505e9028f..98efb7b5660d 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -19,6 +19,9 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
+/* used for vendor specific post processing of cell data */
+typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, unsigned int offset,
+ void *buf, size_t bytes);
enum nvmem_type {
NVMEM_TYPE_UNKNOWN = 0,
@@ -62,6 +65,7 @@ struct nvmem_keepout {
* @no_of_node: Device should not use the parent's of_node even if it's !NULL.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
+ * @cell_post_process: Callback for vendor specific post processing of cell data
* @size: Device size.
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
@@ -92,6 +96,7 @@ struct nvmem_config {
bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
+ nvmem_cell_post_process_t cell_post_process;
int size;
int word_size;
int stride;
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index 7e72d975cb76..aca52db2f3f3 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -66,6 +66,17 @@ struct unwind_hint {
static void __used __section(".discard.func_stack_frame_non_standard") \
*__func_stack_frame_non_standard_##func = func
+/*
+ * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
+ * for the case where a function is intentionally missing frame pointer setup,
+ * but otherwise needs objtool/ORC coverage when frame pointers are disabled.
+ */
+#ifdef CONFIG_FRAME_POINTER
+#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
+#else
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#endif
+
#else /* __ASSEMBLY__ */
/*
@@ -127,6 +138,7 @@ struct unwind_hint {
#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
"\n\t"
#define STACK_FRAME_NON_STANDARD(func)
+#define STACK_FRAME_NON_STANDARD_FP(func)
#else
#define ANNOTATE_INTRA_FUNCTION_CALL
.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
diff --git a/include/linux/of.h b/include/linux/of.h
index 6f1c41f109bb..ff143a027abc 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -185,7 +185,7 @@ static inline bool of_node_is_root(const struct device_node *node)
return node && (node->parent == NULL);
}
-static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+static inline int of_node_check_flag(const struct device_node *n, unsigned long flag)
{
return test_bit(flag, &n->_flags);
}
@@ -353,6 +353,7 @@ extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
int index);
+extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index cf6a65b94d40..cf48983d3c86 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -65,7 +65,6 @@ extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
-extern void __init early_init_dt_scan_chosen_arch(unsigned long node);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index daef3b0d9270..0484b613ca64 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -8,12 +8,13 @@
#include <linux/phy.h>
-#ifdef CONFIG_OF_NET
+#if defined(CONFIG_OF) && defined(CONFIG_NET)
#include <linux/of.h>
struct net_device;
extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
extern int of_get_mac_address(struct device_node *np, u8 *mac);
+int of_get_ethdev_address(struct device_node *np, struct net_device *dev);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np,
@@ -27,6 +28,11 @@ static inline int of_get_mac_address(struct device_node *np, u8 *mac)
return -ENODEV;
}
+static inline int of_get_ethdev_address(struct device_node *np, struct net_device *dev)
+{
+ return -ENODEV;
+}
+
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a558d67ee86f..52ec4b5e5615 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -143,6 +143,8 @@ enum pageflags {
#endif
__NR_PAGEFLAGS,
+ PG_readahead = PG_reclaim,
+
/* Filesystems */
PG_checked = PG_owner_priv_1,
@@ -171,6 +173,15 @@ enum pageflags {
/* Compound pages. Stored in first tail page's flags */
PG_double_map = PG_workingset,
+#ifdef CONFIG_MEMORY_FAILURE
+ /*
+ * Compound pages. Stored in first tail page's flags.
+ * Indicates that at least one subpage is hwpoisoned in the
+ * THP.
+ */
+ PG_has_hwpoisoned = PG_mappedtodisk,
+#endif
+
/* non-lru isolated movable page */
PG_isolated = PG_reclaim,
@@ -193,6 +204,34 @@ static inline unsigned long _compound_head(const struct page *page)
#define compound_head(page) ((typeof(page))_compound_head(page))
+/**
+ * page_folio - Converts from page to folio.
+ * @p: The page.
+ *
+ * Every page is part of a folio. This function cannot be called on a
+ * NULL pointer.
+ *
+ * Context: No reference, nor lock is required on @page. If the caller
+ * does not hold a reference, this call may race with a folio split, so
+ * it should re-check the folio still contains this page after gaining
+ * a reference on the folio.
+ * Return: The folio which contains this page.
+ */
+#define page_folio(p) (_Generic((p), \
+ const struct page *: (const struct folio *)_compound_head(p), \
+ struct page *: (struct folio *)_compound_head(p)))
+
+/**
+ * folio_page - Return a page from a folio.
+ * @folio: The folio.
+ * @n: The page number to return.
+ *
+ * @n is relative to the start of the folio. This function does not
+ * check that the page number lies within @folio; the caller is presumed
+ * to have a reference to the page.
+ */
+#define folio_page(folio, n) nth_page(&(folio)->page, n)
+
static __always_inline int PageTail(struct page *page)
{
return READ_ONCE(page->compound_head) & 1;
@@ -206,7 +245,7 @@ static __always_inline int PageCompound(struct page *page)
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
- return page->flags == PAGE_POISON_PATTERN;
+ return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
@@ -217,6 +256,15 @@ static inline void page_init_poison(struct page *page, size_t size)
}
#endif
+static unsigned long *folio_flags(struct folio *folio, unsigned n)
+{
+ struct page *page = &folio->page;
+
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+ return &page[n].flags;
+}
+
/*
* Page flags policies wrt compound pages
*
@@ -261,36 +309,64 @@ static inline void page_init_poison(struct page *page, size_t size)
VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
PF_POISONED_CHECK(&page[1]); })
+/* Which page is the flag stored in */
+#define FOLIO_PF_ANY 0
+#define FOLIO_PF_HEAD 0
+#define FOLIO_PF_ONLY_HEAD 0
+#define FOLIO_PF_NO_TAIL 0
+#define FOLIO_PF_NO_COMPOUND 0
+#define FOLIO_PF_SECOND 1
+
/*
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
+static __always_inline bool folio_test_##lname(struct folio *folio) \
+{ return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int Page##uname(struct page *page) \
- { return test_bit(PG_##lname, &policy(page, 0)->flags); }
+{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void folio_set_##lname(struct folio *folio) \
+{ set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void SetPage##uname(struct page *page) \
- { set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ set_bit(PG_##lname, &policy(page, 1)->flags); }
#define CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void folio_clear_##lname(struct folio *folio) \
+{ clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void ClearPage##uname(struct page *page) \
- { clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define __SETPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void __folio_set_##lname(struct folio *folio) \
+{ __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void __SetPage##uname(struct page *page) \
- { __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void __folio_clear_##lname(struct folio *folio) \
+{ __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void __ClearPage##uname(struct page *page) \
- { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTSETFLAG(uname, lname, policy) \
+static __always_inline \
+bool folio_test_set_##lname(struct folio *folio) \
+{ return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int TestSetPage##uname(struct page *page) \
- { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTCLEARFLAG(uname, lname, policy) \
+static __always_inline \
+bool folio_test_clear_##lname(struct folio *folio) \
+{ return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int TestClearPage##uname(struct page *page) \
- { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -306,29 +382,37 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
-#define TESTPAGEFLAG_FALSE(uname) \
+#define TESTPAGEFLAG_FALSE(uname, lname) \
+static inline bool folio_test_##lname(const struct folio *folio) { return 0; } \
static inline int Page##uname(const struct page *page) { return 0; }
-#define SETPAGEFLAG_NOOP(uname) \
+#define SETPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_set_##lname(struct folio *folio) { } \
static inline void SetPage##uname(struct page *page) { }
-#define CLEARPAGEFLAG_NOOP(uname) \
+#define CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_clear_##lname(struct folio *folio) { } \
static inline void ClearPage##uname(struct page *page) { }
-#define __CLEARPAGEFLAG_NOOP(uname) \
+#define __CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void __folio_clear_##lname(struct folio *folio) { } \
static inline void __ClearPage##uname(struct page *page) { }
-#define TESTSETFLAG_FALSE(uname) \
+#define TESTSETFLAG_FALSE(uname, lname) \
+static inline bool folio_test_set_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestSetPage##uname(struct page *page) { return 0; }
-#define TESTCLEARFLAG_FALSE(uname) \
+#define TESTCLEARFLAG_FALSE(uname, lname) \
+static inline bool folio_test_clear_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestClearPage##uname(struct page *page) { return 0; }
-#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
- SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
+#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
+ SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
-#define TESTSCFLAG_FALSE(uname) \
- TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
+#define TESTSCFLAG_FALSE(uname, lname) \
+ TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
@@ -384,8 +468,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
+PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
+ TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
#ifdef CONFIG_HIGHMEM
/*
@@ -394,22 +478,25 @@ PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
*/
#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
#else
-PAGEFLAG_FALSE(HighMem)
+PAGEFLAG_FALSE(HighMem, highmem)
#endif
#ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline bool folio_test_swapcache(struct folio *folio)
{
-#ifdef CONFIG_THP_SWAP
- page = compound_head(page);
-#endif
- return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
+ return folio_test_swapbacked(folio) &&
+ test_bit(PG_swapcache, folio_flags(folio, 0));
+}
+static __always_inline bool PageSwapCache(struct page *page)
+{
+ return folio_test_swapcache(page_folio(page));
}
+
SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(SwapCache)
+PAGEFLAG_FALSE(SwapCache, swapcache)
#endif
PAGEFLAG(Unevictable, unevictable, PF_HEAD)
@@ -421,14 +508,14 @@ PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
- TESTSCFLAG_FALSE(Mlocked)
+PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
+ TESTSCFLAG_FALSE(Mlocked, mlocked)
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
#else
-PAGEFLAG_FALSE(Uncached)
+PAGEFLAG_FALSE(Uncached, uncached)
#endif
#ifdef CONFIG_MEMORY_FAILURE
@@ -437,7 +524,7 @@ TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
extern bool take_page_off_buddy(struct page *page);
#else
-PAGEFLAG_FALSE(HWPoison)
+PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
#endif
@@ -451,7 +538,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
#ifdef CONFIG_KASAN_HW_TAGS
PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
#else
-PAGEFLAG_FALSE(SkipKASanPoison)
+PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
#endif
/*
@@ -489,10 +576,14 @@ static __always_inline int PageMappingFlags(struct page *page)
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
}
-static __always_inline int PageAnon(struct page *page)
+static __always_inline bool folio_test_anon(struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+static __always_inline bool PageAnon(struct page *page)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ return folio_test_anon(page_folio(page));
}
static __always_inline int __PageMovable(struct page *page)
@@ -508,30 +599,32 @@ static __always_inline int __PageMovable(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline bool folio_test_ksm(struct folio *folio)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
PAGE_MAPPING_KSM;
}
+
+static __always_inline bool PageKsm(struct page *page)
+{
+ return folio_test_ksm(page_folio(page));
+}
#else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(Ksm, ksm)
#endif
u64 stable_page_flags(struct page *page);
-static inline int PageUptodate(struct page *page)
+static inline bool folio_test_uptodate(struct folio *folio)
{
- int ret;
- page = compound_head(page);
- ret = test_bit(PG_uptodate, &(page)->flags);
+ bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
/*
- * Must ensure that the data we read out of the page is loaded
- * _after_ we've loaded page->flags to check for PageUptodate.
- * We can skip the barrier if the page is not uptodate, because
+ * Must ensure that the data we read out of the folio is loaded
+ * _after_ we've loaded folio->flags to check the uptodate bit.
+ * We can skip the barrier if the folio is not uptodate, because
* we wouldn't be reading anything from it.
*
- * See SetPageUptodate() for the other side of the story.
+ * See folio_mark_uptodate() for the other side of the story.
*/
if (ret)
smp_rmb();
@@ -539,47 +632,71 @@ static inline int PageUptodate(struct page *page)
return ret;
}
-static __always_inline void __SetPageUptodate(struct page *page)
+static inline int PageUptodate(struct page *page)
+{
+ return folio_test_uptodate(page_folio(page));
+}
+
+static __always_inline void __folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
smp_wmb();
- __set_bit(PG_uptodate, &page->flags);
+ __set_bit(PG_uptodate, folio_flags(folio, 0));
}
-static __always_inline void SetPageUptodate(struct page *page)
+static __always_inline void folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
- * so that all previous stores issued in order to bring the page
- * uptodate are actually visible before PageUptodate becomes true.
+ * so that all previous stores issued in order to bring the folio
+ * uptodate are actually visible before folio_test_uptodate becomes true.
*/
smp_wmb();
- set_bit(PG_uptodate, &page->flags);
+ set_bit(PG_uptodate, folio_flags(folio, 0));
+}
+
+static __always_inline void __SetPageUptodate(struct page *page)
+{
+ __folio_mark_uptodate((struct folio *)page);
+}
+
+static __always_inline void SetPageUptodate(struct page *page)
+{
+ folio_mark_uptodate((struct folio *)page);
}
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-int test_clear_page_writeback(struct page *page);
-int __test_set_page_writeback(struct page *page, bool keep_write);
+bool __folio_start_writeback(struct folio *folio, bool keep_write);
+bool set_page_writeback(struct page *page);
-#define test_set_page_writeback(page) \
- __test_set_page_writeback(page, false)
-#define test_set_page_writeback_keepwrite(page) \
- __test_set_page_writeback(page, true)
+#define folio_start_writeback(folio) \
+ __folio_start_writeback(folio, false)
+#define folio_start_writeback_keepwrite(folio) \
+ __folio_start_writeback(folio, true)
-static inline void set_page_writeback(struct page *page)
+static inline void set_page_writeback_keepwrite(struct page *page)
{
- test_set_page_writeback(page);
+ folio_start_writeback_keepwrite(page_folio(page));
}
-static inline void set_page_writeback_keepwrite(struct page *page)
+static inline bool test_set_page_writeback(struct page *page)
{
- test_set_page_writeback_keepwrite(page);
+ return set_page_writeback(page);
}
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
+/* Whether there are one or multiple pages in a folio */
+static inline bool folio_test_single(struct folio *folio)
+{
+ return !folio_test_head(folio);
+}
+
+static inline bool folio_test_multi(struct folio *folio)
+{
+ return folio_test_head(folio);
+}
+
static __always_inline void set_compound_head(struct page *page, struct page *head)
{
WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
@@ -603,12 +720,15 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
+static inline bool folio_test_hugetlb(struct folio *folio)
+{
+ return PageHeadHuge(&folio->page);
+}
#else
-TESTPAGEFLAG_FALSE(Huge)
-TESTPAGEFLAG_FALSE(HeadHuge)
+TESTPAGEFLAG_FALSE(Huge, hugetlb)
+TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -624,6 +744,11 @@ static inline int PageTransHuge(struct page *page)
return PageHead(page);
}
+static inline bool folio_test_transhuge(struct folio *folio)
+{
+ return folio_test_head(folio);
+}
+
/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
@@ -660,12 +785,26 @@ static inline int PageTransTail(struct page *page)
PAGEFLAG(DoubleMap, double_map, PF_SECOND)
TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
#else
-TESTPAGEFLAG_FALSE(TransHuge)
-TESTPAGEFLAG_FALSE(TransCompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap)
-TESTPAGEFLAG_FALSE(TransTail)
-PAGEFLAG_FALSE(DoubleMap)
- TESTSCFLAG_FALSE(DoubleMap)
+TESTPAGEFLAG_FALSE(TransHuge, transhuge)
+TESTPAGEFLAG_FALSE(TransCompound, transcompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
+TESTPAGEFLAG_FALSE(TransTail, transtail)
+PAGEFLAG_FALSE(DoubleMap, double_map)
+ TESTSCFLAG_FALSE(DoubleMap, double_map)
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+/*
+ * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
+ * compound page.
+ *
+ * This flag is set by hwpoison handler. Cleared by THP split or free page.
+ */
+PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+ TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+#else
+PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+ TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
@@ -849,6 +988,11 @@ static inline int page_has_private(struct page *page)
return !!(page->flags & PAGE_FLAGS_PRIVATE);
}
+static inline bool folio_has_private(struct folio *folio)
+{
+ return page_has_private(&folio->page);
+}
+
#undef PF_ANY
#undef PF_HEAD
#undef PF_ONLY_HEAD
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index d8a6aecf99cb..83abf95e9fa7 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -8,46 +8,16 @@
#ifdef CONFIG_PAGE_IDLE_FLAG
-#ifdef CONFIG_64BIT
-static inline bool page_is_young(struct page *page)
-{
- return PageYoung(page);
-}
-
-static inline void set_page_young(struct page *page)
-{
- SetPageYoung(page);
-}
-
-static inline bool test_and_clear_page_young(struct page *page)
-{
- return TestClearPageYoung(page);
-}
-
-static inline bool page_is_idle(struct page *page)
-{
- return PageIdle(page);
-}
-
-static inline void set_page_idle(struct page *page)
-{
- SetPageIdle(page);
-}
-
-static inline void clear_page_idle(struct page *page)
-{
- ClearPageIdle(page);
-}
-#else /* !CONFIG_64BIT */
+#ifndef CONFIG_64BIT
/*
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
extern struct page_ext_operations page_idle_ops;
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = lookup_page_ext(&folio->page);
if (unlikely(!page_ext))
return false;
@@ -55,9 +25,9 @@ static inline bool page_is_young(struct page *page)
return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
-static inline void set_page_young(struct page *page)
+static inline void folio_set_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = lookup_page_ext(&folio->page);
if (unlikely(!page_ext))
return;
@@ -65,9 +35,9 @@ static inline void set_page_young(struct page *page)
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
-static inline bool test_and_clear_page_young(struct page *page)
+static inline bool folio_test_clear_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = lookup_page_ext(&folio->page);
if (unlikely(!page_ext))
return false;
@@ -75,9 +45,9 @@ static inline bool test_and_clear_page_young(struct page *page)
return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
-static inline bool page_is_idle(struct page *page)
+static inline bool folio_test_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = lookup_page_ext(&folio->page);
if (unlikely(!page_ext))
return false;
@@ -85,9 +55,9 @@ static inline bool page_is_idle(struct page *page)
return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
-static inline void set_page_idle(struct page *page)
+static inline void folio_set_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = lookup_page_ext(&folio->page);
if (unlikely(!page_ext))
return;
@@ -95,46 +65,75 @@ static inline void set_page_idle(struct page *page)
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
-static inline void clear_page_idle(struct page *page)
+static inline void folio_clear_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = lookup_page_ext(&folio->page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
-#endif /* CONFIG_64BIT */
+#endif /* !CONFIG_64BIT */
#else /* !CONFIG_PAGE_IDLE_FLAG */
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
{
return false;
}
-static inline void set_page_young(struct page *page)
+static inline void folio_set_young(struct folio *folio)
{
}
-static inline bool test_and_clear_page_young(struct page *page)
+static inline bool folio_test_clear_young(struct folio *folio)
{
return false;
}
-static inline bool page_is_idle(struct page *page)
+static inline bool folio_test_idle(struct folio *folio)
{
return false;
}
-static inline void set_page_idle(struct page *page)
+static inline void folio_set_idle(struct folio *folio)
{
}
-static inline void clear_page_idle(struct page *page)
+static inline void folio_clear_idle(struct folio *folio)
{
}
#endif /* CONFIG_PAGE_IDLE_FLAG */
+static inline bool page_is_young(struct page *page)
+{
+ return folio_test_young(page_folio(page));
+}
+
+static inline void set_page_young(struct page *page)
+{
+ folio_set_young(page_folio(page));
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return folio_test_clear_young(page_folio(page));
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return folio_test_idle(page_folio(page));
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ folio_set_idle(page_folio(page));
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+ folio_clear_idle(page_folio(page));
+}
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 719bfe5108c5..119a0c9d2a8b 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,24 +8,24 @@
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
-extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask);
+ unsigned short order, gfp_t gfp_mask);
extern void __split_page_owner(struct page *page, unsigned int nr);
-extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask);
@@ -36,10 +36,10 @@ static inline void split_page_owner(struct page *page, unsigned int nr)
if (static_branch_unlikely(&page_owner_inited))
__split_page_owner(page, nr);
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
if (static_branch_unlikely(&page_owner_inited))
- __copy_page_owner(oldpage, newpage);
+ __folio_copy_owner(newfolio, old);
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
@@ -52,7 +52,7 @@ static inline void dump_page_owner(const struct page *page)
__dump_page_owner(page);
}
#else
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
@@ -60,10 +60,10 @@ static inline void set_page_owner(struct page *page,
{
}
static inline void split_page_owner(struct page *page,
- unsigned int order)
+ unsigned short order)
{
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 7ad46f45df39..2e677e6ad09f 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -67,9 +67,31 @@ static inline int page_ref_count(const struct page *page)
return atomic_read(&page->_refcount);
}
+/**
+ * folio_ref_count - The reference count on this folio.
+ * @folio: The folio.
+ *
+ * The refcount is usually incremented by calls to folio_get() and
+ * decremented by calls to folio_put(). Some typical users of the
+ * folio refcount:
+ *
+ * - Each reference from a page table
+ * - The page cache
+ * - Filesystem private data
+ * - The LRU list
+ * - Pipes
+ * - Direct IO which references this page in the process address space
+ *
+ * Return: The number of references to this folio.
+ */
+static inline int folio_ref_count(const struct folio *folio)
+{
+ return page_ref_count(&folio->page);
+}
+
static inline int page_count(const struct page *page)
{
- return atomic_read(&compound_head(page)->_refcount);
+ return folio_ref_count(page_folio(page));
}
static inline void set_page_count(struct page *page, int v)
@@ -79,6 +101,11 @@ static inline void set_page_count(struct page *page, int v)
__page_ref_set(page, v);
}
+static inline void folio_set_count(struct folio *folio, int v)
+{
+ set_page_count(&folio->page, v);
+}
+
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
@@ -95,6 +122,11 @@ static inline void page_ref_add(struct page *page, int nr)
__page_ref_mod(page, nr);
}
+static inline void folio_ref_add(struct folio *folio, int nr)
+{
+ page_ref_add(&folio->page, nr);
+}
+
static inline void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
@@ -102,6 +134,11 @@ static inline void page_ref_sub(struct page *page, int nr)
__page_ref_mod(page, -nr);
}
+static inline void folio_ref_sub(struct folio *folio, int nr)
+{
+ page_ref_sub(&folio->page, nr);
+}
+
static inline int page_ref_sub_return(struct page *page, int nr)
{
int ret = atomic_sub_return(nr, &page->_refcount);
@@ -111,6 +148,11 @@ static inline int page_ref_sub_return(struct page *page, int nr)
return ret;
}
+static inline int folio_ref_sub_return(struct folio *folio, int nr)
+{
+ return page_ref_sub_return(&folio->page, nr);
+}
+
static inline void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
@@ -118,6 +160,11 @@ static inline void page_ref_inc(struct page *page)
__page_ref_mod(page, 1);
}
+static inline void folio_ref_inc(struct folio *folio)
+{
+ page_ref_inc(&folio->page);
+}
+
static inline void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
@@ -125,6 +172,11 @@ static inline void page_ref_dec(struct page *page)
__page_ref_mod(page, -1);
}
+static inline void folio_ref_dec(struct folio *folio)
+{
+ page_ref_dec(&folio->page);
+}
+
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
@@ -134,6 +186,11 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
return ret;
}
+static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
+{
+ return page_ref_sub_and_test(&folio->page, nr);
+}
+
static inline int page_ref_inc_return(struct page *page)
{
int ret = atomic_inc_return(&page->_refcount);
@@ -143,6 +200,11 @@ static inline int page_ref_inc_return(struct page *page)
return ret;
}
+static inline int folio_ref_inc_return(struct folio *folio)
+{
+ return page_ref_inc_return(&folio->page);
+}
+
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
@@ -152,6 +214,11 @@ static inline int page_ref_dec_and_test(struct page *page)
return ret;
}
+static inline int folio_ref_dec_and_test(struct folio *folio)
+{
+ return page_ref_dec_and_test(&folio->page);
+}
+
static inline int page_ref_dec_return(struct page *page)
{
int ret = atomic_dec_return(&page->_refcount);
@@ -161,15 +228,91 @@ static inline int page_ref_dec_return(struct page *page)
return ret;
}
-static inline int page_ref_add_unless(struct page *page, int nr, int u)
+static inline int folio_ref_dec_return(struct folio *folio)
+{
+ return page_ref_dec_return(&folio->page);
+}
+
+static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
- int ret = atomic_add_unless(&page->_refcount, nr, u);
+ bool ret = atomic_add_unless(&page->_refcount, nr, u);
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
return ret;
}
+static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
+{
+ return page_ref_add_unless(&folio->page, nr, u);
+}
+
+/**
+ * folio_try_get - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * If you do not already have a reference to a folio, you can attempt to
+ * get one using this function. It may fail if, for example, the folio
+ * has been freed since you found a pointer to it, or it is frozen for
+ * the purposes of splitting or migration.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get(struct folio *folio)
+{
+ return folio_ref_add_unless(folio, 1, 0);
+}
+
+static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
+{
+#ifdef CONFIG_TINY_RCU
+ /*
+ * The caller guarantees the folio will not be freed from interrupt
+ * context, so (on !SMP) we only need preemption to be disabled
+ * and TINY_RCU does that for us.
+ */
+# ifdef CONFIG_PREEMPT_COUNT
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
+# endif
+ VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
+ folio_ref_add(folio, count);
+#else
+ if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
+ /* Either the folio has been freed, or will be freed. */
+ return false;
+ }
+#endif
+ return true;
+}
+
+/**
+ * folio_try_get_rcu - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * This is a version of folio_try_get() optimised for non-SMP kernels.
+ * If you are still holding the rcu_read_lock() after looking up the
+ * page and know that the page cannot have its refcount decreased to
+ * zero in interrupt context, you can use this instead of folio_try_get().
+ *
+ * Example users include get_user_pages_fast() (as pages are not unmapped
+ * from interrupt context) and the page cache lookups (as pages are not
+ * truncated from interrupt context). We also know that pages are not
+ * frozen in interrupt context for the purposes of splitting or migration.
+ *
+ * You can also use this function if you're holding a lock that prevents
+ * pages being frozen & removed; eg the i_pages lock for the page cache
+ * or the mmap_sem or page table lock for page tables. In this case,
+ * it will always succeed, and you could have used a plain folio_get(),
+ * but it's sometimes more convenient to have a common function called
+ * from both locked and RCU-protected contexts.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get_rcu(struct folio *folio)
+{
+ return folio_ref_try_add_rcu(folio, 1);
+}
+
static inline int page_ref_freeze(struct page *page, int count)
{
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
@@ -179,6 +322,11 @@ static inline int page_ref_freeze(struct page *page, int count)
return ret;
}
+static inline int folio_ref_freeze(struct folio *folio, int count)
+{
+ return page_ref_freeze(&folio->page, count);
+}
+
static inline void page_ref_unfreeze(struct page *page, int count)
{
VM_BUG_ON_PAGE(page_count(page) != 0, page);
@@ -189,4 +337,8 @@ static inline void page_ref_unfreeze(struct page *page, int count)
__page_ref_unfreeze(page, count);
}
+static inline void folio_ref_unfreeze(struct folio *folio, int count)
+{
+ page_ref_unfreeze(&folio->page, count);
+}
#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 62db6b0176b9..1a0c646eb6ff 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -24,6 +24,56 @@ static inline bool mapping_empty(struct address_space *mapping)
}
/*
+ * mapping_shrinkable - test if page cache state allows inode reclaim
+ * @mapping: the page cache mapping
+ *
+ * This checks the mapping's cache state for the pupose of inode
+ * reclaim and LRU management.
+ *
+ * The caller is expected to hold the i_lock, but is not required to
+ * hold the i_pages lock, which usually protects cache state. That's
+ * because the i_lock and the list_lru lock that protect the inode and
+ * its LRU state don't nest inside the irq-safe i_pages lock.
+ *
+ * Cache deletions are performed under the i_lock, which ensures that
+ * when an inode goes empty, it will reliably get queued on the LRU.
+ *
+ * Cache additions do not acquire the i_lock and may race with this
+ * check, in which case we'll report the inode as shrinkable when it
+ * has cache pages. This is okay: the shrinker also checks the
+ * refcount and the referenced bit, which will be elevated or set in
+ * the process of adding new cache pages to an inode.
+ */
+static inline bool mapping_shrinkable(struct address_space *mapping)
+{
+ void *head;
+
+ /*
+ * On highmem systems, there could be lowmem pressure from the
+ * inodes before there is highmem pressure from the page
+ * cache. Make inodes shrinkable regardless of cache state.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ return true;
+
+ /* Cache completely empty? Shrink away. */
+ head = rcu_access_pointer(mapping->i_pages.xa_head);
+ if (!head)
+ return true;
+
+ /*
+ * The xarray stores single offset-0 entries directly in the
+ * head pointer, which allows non-resident page cache entries
+ * to escape the shadow shrinker's list of xarray nodes. The
+ * inode shrinker needs to pick them up under memory pressure.
+ */
+ if (!xa_is_node(head) && xa_is_value(head))
+ return true;
+
+ return false;
+}
+
+/*
* Bits in mapping->flags.
*/
enum mapping_flags {
@@ -162,149 +212,152 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
void release_pages(struct page **pages, int nr);
+struct address_space *page_mapping(struct page *);
+struct address_space *folio_mapping(struct folio *);
+struct address_space *swapcache_mapping(struct folio *);
+
+/**
+ * folio_file_mapping - Find the mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Folios in the swap cache return the mapping of the
+ * swap file or swap device where the data is stored. This is different
+ * from the mapping returned by folio_mapping(). The only reason to
+ * use it is if, like NFS, you return 0 from ->activate_swapfile.
+ *
+ * Do not call this for folios which aren't in the page cache or swap cache.
+ */
+static inline struct address_space *folio_file_mapping(struct folio *folio)
+{
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_mapping(folio);
+
+ return folio->mapping;
+}
+
+static inline struct address_space *page_file_mapping(struct page *page)
+{
+ return folio_file_mapping(page_folio(page));
+}
+
/*
* For file cache pages, return the address_space, otherwise return NULL
*/
static inline struct address_space *page_mapping_file(struct page *page)
{
- if (unlikely(PageSwapCache(page)))
+ struct folio *folio = page_folio(page);
+
+ if (unlikely(folio_test_swapcache(folio)))
return NULL;
- return page_mapping(page);
+ return folio_mapping(folio);
}
-/*
- * speculatively take a reference to a page.
- * If the page is free (_refcount == 0), then _refcount is untouched, and 0
- * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
- *
- * This function must be called inside the same rcu_read_lock() section as has
- * been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
- *
- * Unless an RCU grace period has passed, the count of all pages coming out
- * of the allocator must be considered unstable. page_count may return higher
- * than expected, and put_page must be able to do the right thing when the
- * page has been finished with, no matter what it is subsequently allocated
- * for (because put_page is what is used here to drop an invalid speculative
- * reference).
- *
- * This is the interesting part of the lockless pagecache (and lockless
- * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
- * has the following pattern:
- * 1. find page in radix tree
- * 2. conditionally increment refcount
- * 3. check the page is still in pagecache (if no, goto 1)
- *
- * Remove-side that cares about stability of _refcount (eg. reclaim) has the
- * following (with the i_pages lock held):
- * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
- * B. remove page from pagecache
- * C. free the page
- *
- * There are 2 critical interleavings that matter:
- * - 2 runs before A: in this case, A sees elevated refcount and bails out
- * - A runs before 2: in this case, 2 sees zero refcount and retries;
- * subsequently, B will complete and 1 will find no page, causing the
- * lookup to return NULL.
+/**
+ * folio_inode - Get the host inode for this folio.
+ * @folio: The folio.
*
- * It is possible that between 1 and 2, the page is removed then the exact same
- * page is inserted into the same position in pagecache. That's OK: the
- * old find_get_page using a lock could equally have run before or after
- * such a re-insertion, depending on order that locks are granted.
+ * For folios which are in the page cache, return the inode that this folio
+ * belongs to.
*
- * Lookups racing against pagecache insertion isn't a big problem: either 1
- * will find the page or it will not. Likewise, the old find_get_page could run
- * either before the insertion or afterwards, depending on timing.
+ * Do not call this for folios which aren't in the page cache.
*/
-static inline int __page_cache_add_speculative(struct page *page, int count)
+static inline struct inode *folio_inode(struct folio *folio)
{
-#ifdef CONFIG_TINY_RCU
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- /*
- * Preempt must be disabled here - we rely on rcu_read_lock doing
- * this for us.
- *
- * Pagecache won't be truncated from interrupt context, so if we have
- * found a page in the radix tree here, we have pinned its refcount by
- * disabling preempt, and hence no need for the "speculative get" that
- * SMP requires.
- */
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, count);
+ return folio->mapping->host;
+}
-#else
- if (unlikely(!page_ref_add_unless(page, count, 0))) {
- /*
- * Either the page has been freed, or will be freed.
- * In either case, retry here and the caller should
- * do the right thing (see comments above).
- */
- return 0;
- }
-#endif
+static inline bool page_cache_add_speculative(struct page *page, int count)
+{
VM_BUG_ON_PAGE(PageTail(page), page);
-
- return 1;
+ return folio_ref_try_add_rcu((struct folio *)page, count);
}
-static inline int page_cache_get_speculative(struct page *page)
+static inline bool page_cache_get_speculative(struct page *page)
{
- return __page_cache_add_speculative(page, 1);
+ return page_cache_add_speculative(page, 1);
}
-static inline int page_cache_add_speculative(struct page *page, int count)
+/**
+ * folio_attach_private - Attach private data to a folio.
+ * @folio: Folio to attach data to.
+ * @data: Data to attach to folio.
+ *
+ * Attaching private data to a folio increments the page's reference count.
+ * The data must be detached before the folio will be freed.
+ */
+static inline void folio_attach_private(struct folio *folio, void *data)
{
- return __page_cache_add_speculative(page, count);
+ folio_get(folio);
+ folio->private = data;
+ folio_set_private(folio);
}
/**
- * attach_page_private - Attach private data to a page.
- * @page: Page to attach data to.
- * @data: Data to attach to page.
+ * folio_change_private - Change private data on a folio.
+ * @folio: Folio to change the data on.
+ * @data: Data to set on the folio.
*
- * Attaching private data to a page increments the page's reference count.
- * The data must be detached before the page will be freed.
+ * Change the private data attached to a folio and return the old
+ * data. The page must previously have had data attached and the data
+ * must be detached before the folio will be freed.
+ *
+ * Return: Data that was previously attached to the folio.
*/
-static inline void attach_page_private(struct page *page, void *data)
+static inline void *folio_change_private(struct folio *folio, void *data)
{
- get_page(page);
- set_page_private(page, (unsigned long)data);
- SetPagePrivate(page);
+ void *old = folio_get_private(folio);
+
+ folio->private = data;
+ return old;
}
/**
- * detach_page_private - Detach private data from a page.
- * @page: Page to detach data from.
+ * folio_detach_private - Detach private data from a folio.
+ * @folio: Folio to detach data from.
*
- * Removes the data that was previously attached to the page and decrements
+ * Removes the data that was previously attached to the folio and decrements
* the refcount on the page.
*
- * Return: Data that was attached to the page.
+ * Return: Data that was attached to the folio.
*/
-static inline void *detach_page_private(struct page *page)
+static inline void *folio_detach_private(struct folio *folio)
{
- void *data = (void *)page_private(page);
+ void *data = folio_get_private(folio);
- if (!PagePrivate(page))
+ if (!folio_test_private(folio))
return NULL;
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
+ folio_clear_private(folio);
+ folio->private = NULL;
+ folio_put(folio);
return data;
}
+static inline void attach_page_private(struct page *page, void *data)
+{
+ folio_attach_private(page_folio(page), data);
+}
+
+static inline void *detach_page_private(struct page *page)
+{
+ return folio_detach_private(page_folio(page));
+}
+
#ifdef CONFIG_NUMA
-extern struct page *__page_cache_alloc(gfp_t gfp);
+struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
-static inline struct page *__page_cache_alloc(gfp_t gfp)
+static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
{
- return alloc_pages(gfp, 0);
+ return folio_alloc(gfp, order);
}
#endif
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return &filemap_alloc_folio(gfp, 0)->page;
+}
+
static inline struct page *page_cache_alloc(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x));
@@ -331,9 +384,28 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
#define FGP_FOR_MMAP 0x00000040
#define FGP_HEAD 0x00000080
#define FGP_ENTRY 0x00000100
+#define FGP_STABLE 0x00000200
-struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask);
+struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp);
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp);
+
+/**
+ * filemap_get_folio - Find and get a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned with an increased refcount.
+ *
+ * Otherwise, %NULL is returned.
+ */
+static inline struct folio *filemap_get_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, 0, 0);
+}
/**
* find_get_page - find and get a page reference
@@ -377,25 +449,6 @@ static inline struct page *find_lock_page(struct address_space *mapping,
}
/**
- * find_lock_head - Locate, pin and lock a pagecache page.
- * @mapping: The address_space to search.
- * @index: The page index.
- *
- * Looks up the page cache entry at @mapping & @index. If there is a
- * page cache page, its head page is returned locked and with an increased
- * refcount.
- *
- * Context: May sleep.
- * Return: A struct page which is !PageTail, or %NULL if there is no page
- * in the cache for this index.
- */
-static inline struct page *find_lock_head(struct address_space *mapping,
- pgoff_t index)
-{
- return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0);
-}
-
-/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
@@ -452,6 +505,73 @@ static inline bool thp_contains(struct page *head, pgoff_t index)
return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
}
+#define swapcache_index(folio) __page_file_index(&(folio)->page)
+
+/**
+ * folio_index - File index of a folio.
+ * @folio: The folio.
+ *
+ * For a folio which is either in the page cache or the swap cache,
+ * return its index within the address_space it belongs to. If you know
+ * the page is definitely in the page cache, you can look at the folio's
+ * index directly.
+ *
+ * Return: The index (offset in units of pages) of a folio in its file.
+ */
+static inline pgoff_t folio_index(struct folio *folio)
+{
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_index(folio);
+ return folio->index;
+}
+
+/**
+ * folio_next_index - Get the index of the next folio.
+ * @folio: The current folio.
+ *
+ * Return: The index of the folio which follows this folio in the file.
+ */
+static inline pgoff_t folio_next_index(struct folio *folio)
+{
+ return folio->index + folio_nr_pages(folio);
+}
+
+/**
+ * folio_file_page - The page for a particular index.
+ * @folio: The folio which contains this index.
+ * @index: The index we want to look up.
+ *
+ * Sometimes after looking up a folio in the page cache, we need to
+ * obtain the specific page for an index (eg a page fault).
+ *
+ * Return: The page containing the file data for this index.
+ */
+static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
+{
+ /* HugeTLBfs indexes the page cache in units of hpage_size */
+ if (folio_test_hugetlb(folio))
+ return &folio->page;
+ return folio_page(folio, index & (folio_nr_pages(folio) - 1));
+}
+
+/**
+ * folio_contains - Does this folio contain this index?
+ * @folio: The folio.
+ * @index: The page index within the file.
+ *
+ * Context: The caller should have the page locked in order to prevent
+ * (eg) shmem from moving the page between the page cache and swap cache
+ * and changing its index in the middle of the operation.
+ * Return: true or false.
+ */
+static inline bool folio_contains(struct folio *folio, pgoff_t index)
+{
+ /* HugeTLBfs indexes the page cache in units of hpage_size */
+ if (folio_test_hugetlb(folio))
+ return folio->index == index;
+ return index - folio_index(folio) < folio_nr_pages(folio);
+}
+
/*
* Given the page we found in the page cache, return the page corresponding
* to this index in the file
@@ -560,6 +680,27 @@ static inline loff_t page_file_offset(struct page *page)
return ((loff_t)page_index(page)) << PAGE_SHIFT;
}
+/**
+ * folio_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ */
+static inline loff_t folio_pos(struct folio *folio)
+{
+ return page_offset(&folio->page);
+}
+
+/**
+ * folio_file_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ *
+ * This differs from folio_pos() for folios which belong to a swap file.
+ * NFS is the only filesystem today which needs to use folio_file_pos().
+ */
+static inline loff_t folio_file_pos(struct folio *folio)
+{
+ return page_file_offset(&folio->page);
+}
+
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
@@ -575,13 +716,13 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
}
struct wait_page_key {
- struct page *page;
+ struct folio *folio;
int bit_nr;
int page_match;
};
struct wait_page_queue {
- struct page *page;
+ struct folio *folio;
int bit_nr;
wait_queue_entry_t wait;
};
@@ -589,7 +730,7 @@ struct wait_page_queue {
static inline bool wake_page_match(struct wait_page_queue *wait_page,
struct wait_page_key *key)
{
- if (wait_page->page != key->page)
+ if (wait_page->folio != key->folio)
return false;
key->page_match = 1;
@@ -599,20 +740,31 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
return true;
}
-extern void __lock_page(struct page *page);
-extern int __lock_page_killable(struct page *page);
-extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
-extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+void __folio_lock(struct folio *folio);
+int __folio_lock_killable(struct folio *folio);
+bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
unsigned int flags);
-extern void unlock_page(struct page *page);
+void unlock_page(struct page *page);
+void folio_unlock(struct folio *folio);
+
+static inline bool folio_trylock(struct folio *folio)
+{
+ return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+}
/*
* Return true if the page was successfully locked
*/
static inline int trylock_page(struct page *page)
{
- page = compound_head(page);
- return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+ return folio_trylock(page_folio(page));
+}
+
+static inline void folio_lock(struct folio *folio)
+{
+ might_sleep();
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
}
/*
@@ -620,38 +772,30 @@ static inline int trylock_page(struct page *page)
*/
static inline void lock_page(struct page *page)
{
+ struct folio *folio;
might_sleep();
- if (!trylock_page(page))
- __lock_page(page);
+
+ folio = page_folio(page);
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
}
-/*
- * lock_page_killable is like lock_page but can be interrupted by fatal
- * signals. It returns 0 if it locked the page and -EINTR if it was
- * killed while waiting.
- */
-static inline int lock_page_killable(struct page *page)
+static inline int folio_lock_killable(struct folio *folio)
{
might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
+ if (!folio_trylock(folio))
+ return __folio_lock_killable(folio);
return 0;
}
/*
- * lock_page_async - Lock the page, unless this would block. If the page
- * is already locked, then queue a callback when the page becomes unlocked.
- * This callback can then retry the operation.
- *
- * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
- * was already locked and the callback defined in 'wait' was queued.
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals. It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
*/
-static inline int lock_page_async(struct page *page,
- struct wait_page_queue *wait)
+static inline int lock_page_killable(struct page *page)
{
- if (!trylock_page(page))
- return __lock_page_async(page, wait);
- return 0;
+ return folio_lock_killable(page_folio(page));
}
/*
@@ -659,140 +803,122 @@ static inline int lock_page_async(struct page *page,
* caller indicated that it can handle a retry.
*
* Return value and mmap_lock implications depend on flags; see
- * __lock_page_or_retry().
+ * __folio_lock_or_retry().
*/
-static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
+static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
+ struct folio *folio;
might_sleep();
- return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+
+ folio = page_folio(page);
+ return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
}
/*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
* and should not be used directly.
*/
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+void folio_wait_bit(struct folio *folio, int bit_nr);
+int folio_wait_bit_killable(struct folio *folio, int bit_nr);
/*
- * Wait for a page to be unlocked.
+ * Wait for a folio to be unlocked.
*
- * This must be called with the caller "holding" the page,
- * ie with increased "page->count" so that the page won't
+ * This must be called with the caller "holding" the folio,
+ * ie with increased "page->count" so that the folio won't
* go away during the wait..
*/
+static inline void folio_wait_locked(struct folio *folio)
+{
+ if (folio_test_locked(folio))
+ folio_wait_bit(folio, PG_locked);
+}
+
+static inline int folio_wait_locked_killable(struct folio *folio)
+{
+ if (!folio_test_locked(folio))
+ return 0;
+ return folio_wait_bit_killable(folio, PG_locked);
+}
+
static inline void wait_on_page_locked(struct page *page)
{
- if (PageLocked(page))
- wait_on_page_bit(compound_head(page), PG_locked);
+ folio_wait_locked(page_folio(page));
}
static inline int wait_on_page_locked_killable(struct page *page)
{
- if (!PageLocked(page))
- return 0;
- return wait_on_page_bit_killable(compound_head(page), PG_locked);
+ return folio_wait_locked_killable(page_folio(page));
}
int put_and_wait_on_page_locked(struct page *page, int state);
void wait_on_page_writeback(struct page *page);
-int wait_on_page_writeback_killable(struct page *page);
-extern void end_page_writeback(struct page *page);
+void folio_wait_writeback(struct folio *folio);
+int folio_wait_writeback_killable(struct folio *folio);
+void end_page_writeback(struct page *page);
+void folio_end_writeback(struct folio *folio);
void wait_for_stable_page(struct page *page);
+void folio_wait_stable(struct folio *folio);
+void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
+static inline void __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+{
+ __folio_mark_dirty(page_folio(page), mapping, warn);
+}
+void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
+ struct bdi_writeback *wb);
+static inline void account_page_cleaned(struct page *page,
+ struct address_space *mapping, struct bdi_writeback *wb)
+{
+ return folio_account_cleaned(page_folio(page), mapping, wb);
+}
+void __folio_cancel_dirty(struct folio *folio);
+static inline void folio_cancel_dirty(struct folio *folio)
+{
+ /* Avoid atomic ops, locking, etc. when not actually needed. */
+ if (folio_test_dirty(folio))
+ __folio_cancel_dirty(folio);
+}
+static inline void cancel_dirty_page(struct page *page)
+{
+ folio_cancel_dirty(page_folio(page));
+}
+bool folio_clear_dirty_for_io(struct folio *folio);
+bool clear_page_dirty_for_io(struct page *page);
+int __must_check folio_write_one(struct folio *folio);
+static inline int __must_check write_one_page(struct page *page)
+{
+ return folio_write_one(page_folio(page));
+}
-void __set_page_dirty(struct page *, struct address_space *, int warn);
int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
void page_endio(struct page *page, bool is_write, int err);
-/**
- * set_page_private_2 - Set PG_private_2 on a page and take a ref
- * @page: The page.
- *
- * Set the PG_private_2 flag on a page and take the reference needed for the VM
- * to handle its lifetime correctly. This sets the flag and takes the
- * reference unconditionally, so care must be taken not to set the flag again
- * if it's already set.
- */
-static inline void set_page_private_2(struct page *page)
-{
- page = compound_head(page);
- get_page(page);
- SetPagePrivate2(page);
-}
-
-void end_page_private_2(struct page *page);
-void wait_on_page_private_2(struct page *page);
-int wait_on_page_private_2_killable(struct page *page);
+void folio_end_private_2(struct folio *folio);
+void folio_wait_private_2(struct folio *folio);
+int folio_wait_private_2_killable(struct folio *folio);
/*
* Add an arbitrary waiter to a page's wait queue
*/
-extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
+void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
/*
- * Fault everything in given userspace address range in.
+ * Fault in userspace address range.
*/
-static inline int fault_in_pages_writeable(char __user *uaddr, size_t size)
-{
- char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return 0;
-
- if (unlikely(uaddr > end))
- return -EFAULT;
- /*
- * Writing zeroes into userspace here is OK, because we know that if
- * the zero gets there, we'll be overwriting it.
- */
- do {
- if (unlikely(__put_user(0, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK))
- return __put_user(0, end);
-
- return 0;
-}
-
-static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
-{
- volatile char c;
- const char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return 0;
-
- if (unlikely(uaddr > end))
- return -EFAULT;
-
- do {
- if (unlikely(__get_user(c, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK)) {
- return __get_user(c, end);
- }
-
- (void)c;
- return 0;
-}
+size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
+size_t fault_in_readable(const char __user *uaddr, size_t size);
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
+ pgoff_t index, gfp_t gfp);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
+ pgoff_t index, gfp_t gfp);
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow);
void replace_page_cache_page(struct page *old, struct page *new);
@@ -817,6 +943,10 @@ static inline int add_to_page_cache(struct page *page,
return error;
}
+/* Must be non-static for BPF error injection */
+int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp, void **shadowp);
+
/**
* struct readahead_control - Describes a readahead request.
*
@@ -906,33 +1036,57 @@ void page_cache_async_readahead(struct address_space *mapping,
page_cache_async_ra(&ractl, page, req_count);
}
+static inline struct folio *__readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio;
+
+ BUG_ON(ractl->_batch_count > ractl->_nr_pages);
+ ractl->_nr_pages -= ractl->_batch_count;
+ ractl->_index += ractl->_batch_count;
+
+ if (!ractl->_nr_pages) {
+ ractl->_batch_count = 0;
+ return NULL;
+ }
+
+ folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ ractl->_batch_count = folio_nr_pages(folio);
+
+ return folio;
+}
+
/**
* readahead_page - Get the next page to read.
- * @rac: The current readahead request.
+ * @ractl: The current readahead request.
*
* Context: The page is locked and has an elevated refcount. The caller
* should decreases the refcount once the page has been submitted for I/O
* and unlock the page once all I/O to that page has completed.
* Return: A pointer to the next page, or %NULL if we are done.
*/
-static inline struct page *readahead_page(struct readahead_control *rac)
+static inline struct page *readahead_page(struct readahead_control *ractl)
{
- struct page *page;
-
- BUG_ON(rac->_batch_count > rac->_nr_pages);
- rac->_nr_pages -= rac->_batch_count;
- rac->_index += rac->_batch_count;
+ struct folio *folio = __readahead_folio(ractl);
- if (!rac->_nr_pages) {
- rac->_batch_count = 0;
- return NULL;
- }
+ return &folio->page;
+}
- page = xa_load(&rac->mapping->i_pages, rac->_index);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- rac->_batch_count = thp_nr_pages(page);
+/**
+ * readahead_folio - Get the next folio to read.
+ * @ractl: The current readahead request.
+ *
+ * Context: The folio is locked. The caller should unlock the folio once
+ * all I/O to that folio has completed.
+ * Return: A pointer to the next folio, or %NULL if we are done.
+ */
+static inline struct folio *readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio = __readahead_folio(ractl);
- return page;
+ if (folio)
+ folio_put(folio);
+ return folio;
}
static inline unsigned int __readahead_batch(struct readahead_control *rac,
@@ -1040,6 +1194,34 @@ static inline unsigned long dir_pages(struct inode *inode)
}
/**
+ * folio_mkwrite_check_truncate - check if folio was truncated
+ * @folio: the folio to check
+ * @inode: the inode to check the folio against
+ *
+ * Return: the number of bytes in the folio up to EOF,
+ * or -EFAULT if the folio was truncated.
+ */
+static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
+ struct inode *inode)
+{
+ loff_t size = i_size_read(inode);
+ pgoff_t index = size >> PAGE_SHIFT;
+ size_t offset = offset_in_folio(folio, size);
+
+ if (!folio->mapping)
+ return -EFAULT;
+
+ /* folio is wholly inside EOF */
+ if (folio_next_index(folio) - 1 < index)
+ return folio_size(folio);
+ /* folio is wholly past EOF */
+ if (folio->index > index || !offset)
+ return -EFAULT;
+ /* folio is partially inside EOF */
+ return offset;
+}
+
+/**
* page_mkwrite_check_truncate - check if page was truncated
* @page: the page to check
* @inode: the inode to check the page against
@@ -1068,19 +1250,25 @@ static inline int page_mkwrite_check_truncate(struct page *page,
}
/**
- * i_blocks_per_page - How many blocks fit in this page.
+ * i_blocks_per_folio - How many blocks fit in this folio.
* @inode: The inode which contains the blocks.
- * @page: The page (head page if the page is a THP).
+ * @folio: The folio.
*
- * If the block size is larger than the size of this page, return zero.
+ * If the block size is larger than the size of this folio, return zero.
*
- * Context: The caller should hold a refcount on the page to prevent it
+ * Context: The caller should hold a refcount on the folio to prevent it
* from being split.
- * Return: The number of filesystem blocks covered by this page.
+ * Return: The number of filesystem blocks covered by this folio.
*/
static inline
+unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+{
+ return folio_size(folio) >> inode->i_blkbits;
+}
+
+static inline
unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
{
- return thp_size(page) >> inode->i_blkbits;
+ return i_blocks_per_folio(inode, page_folio(page));
}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index d2558121d48c..6f7949b2fd8d 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -3,6 +3,7 @@
#define _LINUX_PART_STAT_H
#include <linux/genhd.h>
+#include <asm/local.h>
struct disk_stats {
u64 nsecs[NR_STAT_GROUPS];
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index f16de399d2de..078225b514d4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -84,6 +84,14 @@ extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
void acpi_pci_add_bus(struct pci_bus *bus);
void acpi_pci_remove_bus(struct pci_bus *bus);
+#ifdef CONFIG_PCI
+void pci_acpi_setup(struct device *dev, struct acpi_device *adev);
+void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev);
+#else
+static inline void pci_acpi_setup(struct device *dev, struct acpi_device *adev) {}
+static inline void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) {}
+#endif
+
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
void acpi_pci_slot_enumerate(struct pci_bus *bus);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index cd8aa6fce204..18a75c8e615c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -233,6 +233,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
/* Don't use Relaxed Ordering for TLPs directed at this device */
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
+ /* Device does honor MSI masking despite saying otherwise */
+ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
@@ -900,7 +902,10 @@ struct pci_driver {
struct pci_dynids dynids;
};
-#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
+static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
+{
+ return drv ? container_of(drv, struct pci_driver, driver) : NULL;
+}
/**
* PCI_DEVICE - macro used to describe a specific PCI device
@@ -1130,6 +1135,7 @@ u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
+u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
u64 pci_get_dsn(struct pci_dev *dev);
@@ -1350,6 +1356,8 @@ void pci_unlock_rescan_remove(void);
/* Vital Product Data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1498,19 +1506,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
-/* kmem_cache style wrapper around pci_alloc_consistent() */
-
#include <linux/dmapool.h>
-#define pci_pool dma_pool
-#define pci_pool_create(name, pdev, size, align, allocation) \
- dma_pool_create(name, &pdev->dev, size, align, allocation)
-#define pci_pool_destroy(pool) dma_pool_destroy(pool)
-#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
-#define pci_pool_zalloc(pool, flags, handle) \
- dma_pool_zalloc(pool, flags, handle)
-#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-
struct msix_entry {
u32 vector; /* Kernel uses to write allocated vector */
u16 entry; /* Driver uses to specify entry, OS writes */
@@ -1671,6 +1668,7 @@ void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
+void pci_dev_lock(struct pci_dev *dev);
int pci_dev_trylock(struct pci_dev *dev);
void pci_dev_unlock(struct pci_dev *dev);
@@ -2126,7 +2124,7 @@ void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
-int pcibios_add_device(struct pci_dev *dev);
+int pcibios_device_add(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index ae16a9856305..b31d3f3312ce 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -267,6 +267,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
}
/**
+ * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
+ * caller is responsible for taking RCU.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ bool ret = false;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (likely(__ref_is_percpu(ref, &percpu_count))) {
+ this_cpu_inc(*percpu_count);
+ ret = true;
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->data->count);
+ }
+ return ret;
+}
+
+/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
*
@@ -283,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count;
bool ret = false;
rcu_read_lock();
-
- if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
- ret = true;
- } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
- ret = atomic_long_inc_not_zero(&ref->data->count);
- }
-
+ ret = percpu_ref_tryget_live_rcu(ref);
rcu_read_unlock();
-
return ret;
}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5e76af742c80..98a9371133f8 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -123,7 +123,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
#endif
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
@@ -131,8 +131,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
+extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9b60bb89d86a..0dcfd265beed 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -57,6 +57,7 @@ struct perf_guest_info_callbacks {
#include <linux/cgroup.h>
#include <linux/refcount.h>
#include <linux/security.h>
+#include <linux/static_call.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -1400,6 +1401,7 @@ perf_event_addr_filters(struct perf_event *event)
}
extern void perf_event_addr_filters_sync(struct perf_event *event);
+extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_sample_data *data,
@@ -1614,4 +1616,26 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
#endif
+/*
+ * Snapshot branch stack on software events.
+ *
+ * Branch stack can be very useful in understanding software events. For
+ * example, when a long function, e.g. sys_perf_event_open, returns an
+ * errno, it is not obvious why the function failed. Branch stack could
+ * provide very helpful information in this type of scenarios.
+ *
+ * On software event, it is necessary to stop the hardware branch recorder
+ * fast. Otherwise, the hardware register/buffer will be flushed with
+ * entries of the triggering event. Therefore, static call is used to
+ * stop the hardware recorder.
+ */
+
+/*
+ * cnt is the number of entries allocated for entries.
+ * Return number of entries copied to .
+ */
+typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
+ unsigned int cnt);
+DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
+
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 736e1d1a47c4..96e43fbb2dd8 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -155,6 +155,40 @@ typedef enum {
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
+/* PHY interface mode bitmap handling */
+#define DECLARE_PHY_INTERFACE_MASK(name) \
+ DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX)
+
+static inline void phy_interface_zero(unsigned long *intf)
+{
+ bitmap_zero(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline bool phy_interface_empty(const unsigned long *intf)
+{
+ return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_set_rgmii(unsigned long *intf)
+{
+ __set_bit(PHY_INTERFACE_MODE_RGMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf);
+}
+
/*
* phy_supported_speeds - return all speeds currently supported by a PHY device
*/
@@ -1584,6 +1618,7 @@ int genphy_c45_config_aneg(struct phy_device *phydev);
int genphy_c45_loopback(struct phy_device *phydev, bool enable);
int genphy_c45_pma_resume(struct phy_device *phydev);
int genphy_c45_pma_suspend(struct phy_device *phydev);
+int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
/* Generic C45 PHY driver */
extern struct phy_driver genphy_c45_driver;
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 237291196ce2..f037470b6fb3 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -67,6 +67,8 @@ enum phylink_op_type {
* @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
+ * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
+ * are supported by the MAC/PCS.
*/
struct phylink_config {
struct device *dev;
@@ -76,6 +78,7 @@ struct phylink_config {
bool ovr_an_inband;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
};
/**
@@ -133,8 +136,14 @@ struct phylink_mac_ops {
* based on @state->advertising and/or @state->speed and update
* @state->interface accordingly. See phylink_helper_basex_speed().
*
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
- * MAC driver to return all supported link modes.
+ * When @config->supported_interfaces has been set, phylink will iterate
+ * over the supported interfaces to determine the full capability of the
+ * MAC. The validation function must not print errors if @state->interface
+ * is set to an unexpected value.
+ *
+ * When @config->supported_interfaces is empty, phylink will call this
+ * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and
+ * expects the MAC driver to return all supported link modes.
*
* If the @state->interface mode is not supported, then the @supported
* mask must be cleared.
@@ -484,6 +493,7 @@ int phylink_speed_up(struct phylink *pl);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
+void phylink_set_10g_modes(unsigned long *mask);
void phylink_helper_basex_speed(struct phylink_link_state *state);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
diff --git a/include/linux/pid.h b/include/linux/pid.h
index af308e15f174..343abf22092e 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -78,6 +78,7 @@ struct file;
extern struct pid *pidfd_pid(const struct file *file);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
int pidfd_create(struct pid *pid, unsigned int flags);
static inline struct pid *get_pid(struct pid *pid)
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index 1d30bf278231..2b5676ff35be 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -125,7 +125,7 @@ struct brcmfmac_pd_cc_entry {
*/
struct brcmfmac_pd_cc {
int table_size;
- struct brcmfmac_pd_cc_entry table[0];
+ struct brcmfmac_pd_cc_entry table[];
};
/**
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 02599687770c..df3c78c92ca2 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -205,7 +205,7 @@ struct cros_ec_dev {
struct cros_ec_debugfs *debug_info;
bool has_kb_wake_angle;
u16 cmd_offset;
- u32 features[2];
+ struct ec_response_get_features features;
};
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
@@ -227,10 +227,13 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
+int cros_ec_command(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata,
+ int outsize, void *indata, int insize);
+
/**
* cros_ec_get_time_ns() - Return time in ns.
*
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 101333fe2b8d..40185f9d7c14 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -25,12 +25,75 @@ enum mlxreg_wdt_type {
};
/**
+ * enum mlxreg_hotplug_kind - kind of hotplug entry
+ *
+ * @MLXREG_HOTPLUG_DEVICE_NA: do not care;
+ * @MLXREG_HOTPLUG_LC_PRESENT: entry for line card presence in/out events;
+ * @MLXREG_HOTPLUG_LC_VERIFIED: entry for line card verification status events
+ * coming after line card security signature validation;
+ * @MLXREG_HOTPLUG_LC_POWERED: entry for line card power on/off events;
+ * @MLXREG_HOTPLUG_LC_SYNCED: entry for line card synchronization events, coming
+ * after hardware-firmware synchronization handshake;
+ * @MLXREG_HOTPLUG_LC_READY: entry for line card ready events, indicating line card
+ PHYs ready / unready state;
+ * @MLXREG_HOTPLUG_LC_ACTIVE: entry for line card active events, indicating firmware
+ * availability / unavailability for the ports on line card;
+ * @MLXREG_HOTPLUG_LC_THERMAL: entry for line card thermal shutdown events, positive
+ * event indicates that system should power off the line
+ * card for which this event has been received;
+ */
+enum mlxreg_hotplug_kind {
+ MLXREG_HOTPLUG_DEVICE_NA = 0,
+ MLXREG_HOTPLUG_LC_PRESENT = 1,
+ MLXREG_HOTPLUG_LC_VERIFIED = 2,
+ MLXREG_HOTPLUG_LC_POWERED = 3,
+ MLXREG_HOTPLUG_LC_SYNCED = 4,
+ MLXREG_HOTPLUG_LC_READY = 5,
+ MLXREG_HOTPLUG_LC_ACTIVE = 6,
+ MLXREG_HOTPLUG_LC_THERMAL = 7,
+};
+
+/**
+ * enum mlxreg_hotplug_device_action - hotplug device action required for
+ * driver's connectivity
+ *
+ * @MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: probe device for 'on' event, remove
+ * for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: probe platform device for 'on'
+ * event, remove for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_NO_ACTION: no connectivity action is required;
+ */
+enum mlxreg_hotplug_device_action {
+ MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION = 0,
+ MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION = 1,
+ MLXREG_HOTPLUG_DEVICE_NO_ACTION = 2,
+};
+
+/**
+ * struct mlxreg_core_hotplug_notifier - hotplug notifier block:
+ *
+ * @identity: notifier identity name;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ */
+struct mlxreg_core_hotplug_notifier {
+ char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+};
+
+/**
* struct mlxreg_hotplug_device - I2C device data:
*
* @adapter: I2C device adapter;
* @client: I2C device client;
* @brdinfo: device board information;
* @nr: I2C device adapter number, to which device is to be attached;
+ * @pdev: platform device, if device is instantiated as a platform device;
+ * @action: action to be performed upon event receiving;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ * @notifier: pointer to event notifier block;
*
* Structure represents I2C hotplug device static data (board topology) and
* dynamic data (related kernel objects handles).
@@ -40,6 +103,11 @@ struct mlxreg_hotplug_device {
struct i2c_client *client;
struct i2c_board_info *brdinfo;
int nr;
+ struct platform_device *pdev;
+ enum mlxreg_hotplug_device_action action;
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+ struct mlxreg_core_hotplug_notifier *notifier;
};
/**
@@ -51,12 +119,18 @@ struct mlxreg_hotplug_device {
* @bit: attribute effective bit;
* @capability: attribute capability register;
* @reg_prsnt: attribute presence register;
+ * @reg_sync: attribute synch register;
+ * @reg_pwr: attribute power register;
+ * @reg_ena: attribute enable register;
* @mode: access mode;
* @np - pointer to node platform associated with attribute;
* @hpdev - hotplug device data;
+ * @notifier: pointer to event notifier block;
* @health_cntr: dynamic device health indication counter;
* @attached: true if device has been attached after good health indication;
* @regnum: number of registers occupied by multi-register attribute;
+ * @slot: slot number, at which device is located;
+ * @secured: if set indicates that entry access is secured;
*/
struct mlxreg_core_data {
char label[MLXREG_CORE_LABEL_MAX_SIZE];
@@ -65,18 +139,25 @@ struct mlxreg_core_data {
u32 bit;
u32 capability;
u32 reg_prsnt;
+ u32 reg_sync;
+ u32 reg_pwr;
+ u32 reg_ena;
umode_t mode;
struct device_node *np;
struct mlxreg_hotplug_device hpdev;
+ struct mlxreg_core_hotplug_notifier *notifier;
u32 health_cntr;
bool attached;
u8 regnum;
+ u8 slot;
+ u8 secured;
};
/**
* struct mlxreg_core_item - same type components controlled by the driver:
*
* @data: component data;
+ * @kind: kind of hotplug attribute;
* @aggr_mask: group aggregation mask;
* @reg: group interrupt status register;
* @mask: group interrupt mask;
@@ -89,6 +170,7 @@ struct mlxreg_core_data {
*/
struct mlxreg_core_item {
struct mlxreg_core_data *data;
+ enum mlxreg_hotplug_kind kind;
u32 aggr_mask;
u32 reg;
u32 mask;
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 9837fb011f2f..eb556f988d57 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -50,6 +50,9 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_OTG BIT(30)
+#define SYSC_QUIRK_RESET_ON_CTX_LOST BIT(29)
+#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28)
#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h
deleted file mode 100644
index de6a4ad41e76..000000000000
--- a/include/linux/platform_data/ux500_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST Ericsson SA 2011
- *
- * STE Ux500 Watchdog platform data
- */
-#ifndef __UX500_WDT_H
-#define __UX500_WDT_H
-
-/**
- * struct ux500_wdt_data
- */
-struct ux500_wdt_data {
- unsigned int timeout;
- bool has_28_bits_resolution;
-};
-
-#endif /* __UX500_WDT_H */
diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h
new file mode 100644
index 000000000000..da05f425587a
--- /dev/null
+++ b/include/linux/platform_data/x86/soc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Helpers for Intel SoC model detection
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef __PLATFORM_DATA_X86_SOC_H
+#define __PLATFORM_DATA_X86_SOC_H
+
+#if IS_ENABLED(CONFIG_X86)
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define SOC_INTEL_IS_CPU(soc, type) \
+static inline bool soc_intel_is_##soc(void) \
+{ \
+ static const struct x86_cpu_id soc##_cpu_ids[] = { \
+ X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
+ {} \
+ }; \
+ const struct x86_cpu_id *id; \
+ \
+ id = x86_match_cpu(soc##_cpu_ids); \
+ if (id) \
+ return true; \
+ return false; \
+}
+
+SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
+SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
+SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
+SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
+SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
+
+#else /* IS_ENABLED(CONFIG_X86) */
+
+static inline bool soc_intel_is_byt(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cht(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_apl(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_glk(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cml(void)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_X86) */
+
+#endif /* __PLATFORM_DATA_X86_SOC_H */
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 66bab1bca35c..0f352c1d3c80 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -73,8 +73,11 @@
#ifndef _LINUX_PLIST_H_
#define _LINUX_PLIST_H_
-#include <linux/kernel.h>
+#include <linux/container_of.h>
#include <linux/list.h>
+#include <linux/types.h>
+
+#include <asm/bug.h>
struct plist_head {
struct list_head node_list;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 84150a22fd7c..879c138c7b8e 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -156,9 +156,9 @@ int devm_pm_opp_set_clkname(struct device *dev, const char *name);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
+struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs);
void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
-int devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
+int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs);
struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
@@ -376,7 +376,7 @@ static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
+static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -384,7 +384,7 @@ static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, cons
static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {}
static inline int devm_pm_opp_attach_genpd(struct device *dev,
- const char **names,
+ const char * const *names,
struct device ***virt_devs)
{
return -EOPNOTSUPP;
@@ -439,7 +439,9 @@ static inline int dev_pm_opp_sync_regulators(struct device *dev)
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
+int devm_pm_opp_of_add_table_indexed(struct device *dev, int index);
int dev_pm_opp_of_add_table_noclk(struct device *dev, int index);
+int devm_pm_opp_of_add_table_noclk(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
int devm_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
@@ -465,11 +467,21 @@ static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
return -EOPNOTSUPP;
}
+static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
{
return -EOPNOTSUPP;
}
+static inline int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index cd5b62db9084..e63a63aa47a3 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -17,8 +17,8 @@
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
- int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
extern void dev_pm_enable_wake_irq(struct device *dev);
extern void dev_pm_disable_wake_irq(struct device *dev);
@@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return 0;
}
+static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return 0;
+}
+
static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
deleted file mode 100644
index 17d7d0d20eca..000000000000
--- a/include/linux/pnfs_osd_xdr.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * pNFS-osd on-the-wire data structures
- *
- * Copyright (C) 2007 Panasas Inc. [year of first publication]
- * All rights reserved.
- *
- * Benny Halevy <bhalevy@panasas.com>
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * See the file COPYING included with this distribution for more details.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Panasas company nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __PNFS_OSD_XDR_H__
-#define __PNFS_OSD_XDR_H__
-
-#include <linux/nfs_fs.h>
-
-/*
- * draft-ietf-nfsv4-minorversion-22
- * draft-ietf-nfsv4-pnfs-obj-12
- */
-
-/* Layout Structure */
-
-enum pnfs_osd_raid_algorithm4 {
- PNFS_OSD_RAID_0 = 1,
- PNFS_OSD_RAID_4 = 2,
- PNFS_OSD_RAID_5 = 3,
- PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
-};
-
-/* struct pnfs_osd_data_map4 {
- * uint32_t odm_num_comps;
- * length4 odm_stripe_unit;
- * uint32_t odm_group_width;
- * uint32_t odm_group_depth;
- * uint32_t odm_mirror_cnt;
- * pnfs_osd_raid_algorithm4 odm_raid_algorithm;
- * };
- */
-struct pnfs_osd_data_map {
- u32 odm_num_comps;
- u64 odm_stripe_unit;
- u32 odm_group_width;
- u32 odm_group_depth;
- u32 odm_mirror_cnt;
- u32 odm_raid_algorithm;
-};
-
-/* struct pnfs_osd_objid4 {
- * deviceid4 oid_device_id;
- * uint64_t oid_partition_id;
- * uint64_t oid_object_id;
- * };
- */
-struct pnfs_osd_objid {
- struct nfs4_deviceid oid_device_id;
- u64 oid_partition_id;
- u64 oid_object_id;
-};
-
-/* For printout. I use:
- * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer));
- * BE style
- */
-#define _DEVID_LO(oid_device_id) \
- (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data)
-
-#define _DEVID_HI(oid_device_id) \
- (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
-
-enum pnfs_osd_version {
- PNFS_OSD_MISSING = 0,
- PNFS_OSD_VERSION_1 = 1,
- PNFS_OSD_VERSION_2 = 2
-};
-
-struct pnfs_osd_opaque_cred {
- u32 cred_len;
- void *cred;
-};
-
-enum pnfs_osd_cap_key_sec {
- PNFS_OSD_CAP_KEY_SEC_NONE = 0,
- PNFS_OSD_CAP_KEY_SEC_SSV = 1,
-};
-
-/* struct pnfs_osd_object_cred4 {
- * pnfs_osd_objid4 oc_object_id;
- * pnfs_osd_version4 oc_osd_version;
- * pnfs_osd_cap_key_sec4 oc_cap_key_sec;
- * opaque oc_capability_key<>;
- * opaque oc_capability<>;
- * };
- */
-struct pnfs_osd_object_cred {
- struct pnfs_osd_objid oc_object_id;
- u32 oc_osd_version;
- u32 oc_cap_key_sec;
- struct pnfs_osd_opaque_cred oc_cap_key;
- struct pnfs_osd_opaque_cred oc_cap;
-};
-
-/* struct pnfs_osd_layout4 {
- * pnfs_osd_data_map4 olo_map;
- * uint32_t olo_comps_index;
- * pnfs_osd_object_cred4 olo_components<>;
- * };
- */
-struct pnfs_osd_layout {
- struct pnfs_osd_data_map olo_map;
- u32 olo_comps_index;
- u32 olo_num_comps;
- struct pnfs_osd_object_cred *olo_comps;
-};
-
-/* Device Address */
-enum pnfs_osd_targetid_type {
- OBJ_TARGET_ANON = 1,
- OBJ_TARGET_SCSI_NAME = 2,
- OBJ_TARGET_SCSI_DEVICE_ID = 3,
-};
-
-/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) {
- * case OBJ_TARGET_SCSI_NAME:
- * string oti_scsi_name<>;
- *
- * case OBJ_TARGET_SCSI_DEVICE_ID:
- * opaque oti_scsi_device_id<>;
- *
- * default:
- * void;
- * };
- *
- * union pnfs_osd_targetaddr4 switch (bool ota_available) {
- * case TRUE:
- * netaddr4 ota_netaddr;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_deviceaddr4 {
- * pnfs_osd_targetid4 oda_targetid;
- * pnfs_osd_targetaddr4 oda_targetaddr;
- * uint64_t oda_lun;
- * opaque oda_systemid<>;
- * pnfs_osd_object_cred4 oda_root_obj_cred;
- * opaque oda_osdname<>;
- * };
- */
-struct pnfs_osd_targetid {
- u32 oti_type;
- struct nfs4_string oti_scsi_device_id;
-};
-
-/* struct netaddr4 {
- * // see struct rpcb in RFC1833
- * string r_netid<>; // network id
- * string r_addr<>; // universal address
- * };
- */
-struct pnfs_osd_net_addr {
- struct nfs4_string r_netid;
- struct nfs4_string r_addr;
-};
-
-struct pnfs_osd_targetaddr {
- u32 ota_available;
- struct pnfs_osd_net_addr ota_netaddr;
-};
-
-struct pnfs_osd_deviceaddr {
- struct pnfs_osd_targetid oda_targetid;
- struct pnfs_osd_targetaddr oda_targetaddr;
- u8 oda_lun[8];
- struct nfs4_string oda_systemid;
- struct pnfs_osd_object_cred oda_root_obj_cred;
- struct nfs4_string oda_osdname;
-};
-
-/* LAYOUTCOMMIT: layoutupdate */
-
-/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
- * case TRUE:
- * int64_t dsu_delta;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_layoutupdate4 {
- * pnfs_osd_deltaspaceused4 olu_delta_space_used;
- * bool olu_ioerr_flag;
- * };
- */
-struct pnfs_osd_layoutupdate {
- u32 dsu_valid;
- s64 dsu_delta;
- u32 olu_ioerr_flag;
-};
-
-/* LAYOUTRETURN: I/O Rrror Report */
-
-enum pnfs_osd_errno {
- PNFS_OSD_ERR_EIO = 1,
- PNFS_OSD_ERR_NOT_FOUND = 2,
- PNFS_OSD_ERR_NO_SPACE = 3,
- PNFS_OSD_ERR_BAD_CRED = 4,
- PNFS_OSD_ERR_NO_ACCESS = 5,
- PNFS_OSD_ERR_UNREACHABLE = 6,
- PNFS_OSD_ERR_RESOURCE = 7
-};
-
-/* struct pnfs_osd_ioerr4 {
- * pnfs_osd_objid4 oer_component;
- * length4 oer_comp_offset;
- * length4 oer_comp_length;
- * bool oer_iswrite;
- * pnfs_osd_errno4 oer_errno;
- * };
- */
-struct pnfs_osd_ioerr {
- struct pnfs_osd_objid oer_component;
- u64 oer_comp_offset;
- u64 oer_comp_length;
- u32 oer_iswrite;
- u32 oer_errno;
-};
-
-/* OSD XDR Client API */
-/* Layout helpers */
-/* Layout decoding is done in two parts:
- * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
- * of the layout. @iter members need not be initialized.
- * Returned:
- * @layout members are set. (@layout->olo_comps set to NULL).
- *
- * Zero on success, or negative error if passed xdr is broken.
- *
- * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns
- * false, to decode the next component.
- * Returned:
- * true if there is more to decode or false if we are done or error.
- *
- * Example:
- * struct pnfs_osd_xdr_decode_layout_iter iter;
- * struct pnfs_osd_layout layout;
- * struct pnfs_osd_object_cred comp;
- * int status;
- *
- * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
- * if (unlikely(status))
- * goto err;
- * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) {
- * // All of @comp strings point to inside the xdr_buffer
- * // or scrach buffer. Copy them out to user memory eg.
- * copy_single_comp(dest_comp++, &comp);
- * }
- * if (unlikely(status))
- * goto err;
- */
-
-struct pnfs_osd_xdr_decode_layout_iter {
- unsigned total_comps;
- unsigned decoded_comps;
-};
-
-extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
-
-extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
- int *err);
-
-/* Device Info helpers */
-
-/* Note: All strings inside @deviceaddr point to space inside @p.
- * @p should stay valid while @deviceaddr is in use.
- */
-extern void pnfs_osd_xdr_decode_deviceaddr(
- struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
-
-/* layoutupdate (layout_commit) xdr helpers */
-extern int
-pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
- struct pnfs_osd_layoutupdate *lou);
-
-/* osd_ioerror encoding (layout_return) */
-extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
-extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
-
-#endif /* __PNFS_OSD_XDR_H__ */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 00fef0064355..5bbcd280bfd2 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -184,8 +184,10 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
#endif
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+void clear_posix_cputimers_work(struct task_struct *p);
void posix_cputimers_init_work(void);
#else
+static inline void clear_posix_cputimers_work(struct task_struct *p) { }
static inline void posix_cputimers_init_work(void) { }
#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index dd24756a8af7..c417abd2ab70 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -78,7 +78,7 @@ enum max17042_register {
MAX17042_T_empty = 0x34,
MAX17042_FullCAP0 = 0x35,
- MAX17042_LAvg_empty = 0x36,
+ MAX17042_IAvg_empty = 0x36,
MAX17042_FCTC = 0x37,
MAX17042_RCOMP0 = 0x38,
MAX17042_TempCo = 0x39,
@@ -221,7 +221,7 @@ struct max17042_config_data {
u16 fullcap; /* 0x10 */
u16 fullcapnom; /* 0x23 */
u16 socempty; /* 0x33 */
- u16 lavg_empty; /* 0x36 */
+ u16 iavg_empty; /* 0x36 */
u16 dqacc; /* 0x45 */
u16 dpacc; /* 0x46 */
u16 qrtbl00; /* 0x12 */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 4d244e295e85..b4381f255a5c 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -77,6 +77,27 @@
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+/**
+ * interrupt_context_level - return interrupt context level
+ *
+ * Returns the current interrupt context level.
+ * 0 - normal context
+ * 1 - softirq context
+ * 2 - hardirq context
+ * 3 - NMI context
+ */
+static __always_inline unsigned char interrupt_context_level(void)
+{
+ unsigned long pc = preempt_count();
+ unsigned char level = 0;
+
+ level += !!(pc & (NMI_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ return level;
+}
+
#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#ifdef CONFIG_PREEMPT_RT
@@ -122,9 +143,10 @@
* The preempt_count offset after spin_lock()
*/
#if !defined(CONFIG_PREEMPT_RT)
-#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
#else
-#define PREEMPT_LOCK_OFFSET 0
+/* Locks on RT do not disable preemption */
+#define PREEMPT_LOCK_OFFSET 0
#endif
/*
diff --git a/include/linux/property.h b/include/linux/property.h
index 357513a977e5..88fa726a76df 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
struct device;
+struct net_device;
enum dev_prop_type {
DEV_PROP_U8,
@@ -389,11 +390,7 @@ const void *device_get_match_data(struct device *dev);
int device_get_phy_mode(struct device *dev);
-void *device_get_mac_address(struct device *dev, char *addr, int alen);
-
int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
- char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index b5ebf6c01292..8aee2945ff08 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -362,29 +362,25 @@ static inline void user_single_step_report(struct pt_regs *regs)
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with the siglock held, to decide whether or not it's
- * necessary to release the siglock and call arch_ptrace_stop() with the
- * same @code and @info arguments. It can be defined to a constant if
- * arch_ptrace_stop() is never required, or always is. On machines where
- * this makes sense, it should be defined to a quick test to optimize out
- * calling arch_ptrace_stop() when it would be superfluous. For example,
- * if the thread has not been back to user mode since the last stop, the
- * thread state might indicate that nothing needs to be done.
+ * necessary to release the siglock and call arch_ptrace_stop(). It can be
+ * defined to a constant if arch_ptrace_stop() is never required, or always
+ * is. On machines where this makes sense, it should be defined to a quick
+ * test to optimize out calling arch_ptrace_stop() when it would be
+ * superfluous. For example, if the thread has not been back to user mode
+ * since the last stop, the thread state might indicate that nothing needs
+ * to be done.
*
* This is guaranteed to be invoked once before a task stops for ptrace and
* may include arch-specific operations necessary prior to a ptrace stop.
*/
-#define arch_ptrace_stop_needed(code, info) (0)
+#define arch_ptrace_stop_needed() (0)
#endif
#ifndef arch_ptrace_stop
/**
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with no locks held when arch_ptrace_stop_needed() has
* just returned nonzero. It is allowed to block, e.g. for user memory
@@ -394,7 +390,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
* we only do it when the arch requires it for this particular stop, as
* indicated by arch_ptrace_stop_needed().
*/
-#define arch_ptrace_stop(code, info) do { } while (0)
+#define arch_ptrace_stop() do { } while (0)
#endif
#ifndef current_pt_regs
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 725c9b784e60..e6dac95e4960 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -429,16 +429,19 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
#else
static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_free(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline int pwm_apply_state(struct pwm_device *pwm,
const struct pwm_state *state)
{
+ might_sleep();
return -ENOTSUPP;
}
@@ -450,6 +453,7 @@ static inline int pwm_adjust_config(struct pwm_device *pwm)
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
int period_ns)
{
+ might_sleep();
return -EINVAL;
}
@@ -462,11 +466,13 @@ static inline int pwm_capture(struct pwm_device *pwm,
static inline int pwm_enable(struct pwm_device *pwm)
{
+ might_sleep();
return -EINVAL;
}
static inline void pwm_disable(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
@@ -493,12 +499,14 @@ static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -506,16 +514,19 @@ static inline struct pwm_device *of_pwm_get(struct device *dev,
struct device_node *np,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -523,6 +534,7 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
struct device_node *np,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -530,6 +542,7 @@ static inline struct pwm_device *
devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 0a3807e927c5..827624840ee2 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _COMMON_HSI_H
@@ -47,10 +47,10 @@
#define ISCSI_CDU_TASK_SEG_TYPE 0
#define FCOE_CDU_TASK_SEG_TYPE 0
#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ETH_CDU_TASK_SEG_TYPE 2
#define FW_ASSERT_GENERAL_ATTN_IDX 32
-
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
#define MSTORM_QZONE_SIZE 16
@@ -60,9 +60,12 @@
#define PSTORM_QZONE_SIZE 0
#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+#define ETH_MAX_RXQ_VF_DEFAULT 16
+#define ETH_MAX_RXQ_VF_DOUBLE 48
+#define ETH_MAX_RXQ_VF_QUAD 112
+
+#define ETH_RGSRC_CTX_SIZE 6
+#define ETH_TGSRC_CTX_SIZE 6
/********************************/
/* CORE (LIGHT L2) FW CONSTANTS */
@@ -89,8 +92,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 42
-#define FW_REVISION_VERSION 2
+#define FW_MINOR_VERSION 59
+#define FW_REVISION_VERSION 1
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -112,6 +115,7 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
@@ -133,7 +137,7 @@
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
@@ -144,7 +148,7 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 11
/*****************/
/* CDU CONSTANTS */
@@ -162,6 +166,7 @@
#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3d)
/*****************/
/* DQ CONSTANTS */
@@ -302,6 +307,9 @@
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28
+#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30
+#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
@@ -325,6 +333,13 @@
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+ (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
+ (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \
+ (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
+
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -360,6 +375,7 @@
/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+#define COMMON_MAX_QM_GLOBAL_RLS MAX_QM_GLOBAL_RLS
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
@@ -379,7 +395,7 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB_E4 12
+#define PIS_PER_SB 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
@@ -700,6 +716,13 @@ enum mf_mode {
MAX_MF_MODE
};
+/* Per protocol packet duplication enable bit vector. If set, duplicate
+ * offloaded traffic to LL2 debug queueu.
+ */
+struct offload_pkt_dup_enable {
+ __le16 enable_vector;
+};
+
/* Per-protocol connection types */
enum protocol_type {
PROTOCOLID_TCP_ULP,
@@ -717,6 +740,12 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+/* Pstorm packet duplication config */
+struct pstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved[3];
+};
+
struct regpair {
__le32 lo;
__le32 hi;
@@ -728,10 +757,24 @@ struct rdma_eqe_destroy_qp {
u8 reserved[4];
};
+/* RoCE Suspend Event Data */
+struct rdma_eqe_suspend_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
/* RDMA Event Data Union */
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+ struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+};
+
+/* Tstorm packet duplication config */
+struct tstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved;
+ __le32 cid;
};
struct tstorm_queue_zone {
@@ -891,6 +934,15 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in legacy mode, without DEMS */
+struct db_legacy_wo_dems_addr {
+ __le32 addr;
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2
+};
+
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
@@ -907,6 +959,31 @@ struct db_pwm_addr {
};
/* Parameters to RDMA firmware, passed in EDPM doorbell */
+struct db_rdma_24b_icid_dpm_params {
+ __le32 params;
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
@@ -1220,21 +1297,41 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* Searcher Table struct */
+struct src_entry_header {
+ __le32 flags;
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK 0x1
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0
+#define SRC_ENTRY_HEADER_EMPTY_MASK 0x1
+#define SRC_ENTRY_HEADER_EMPTY_SHIFT 1
+#define SRC_ENTRY_HEADER_RESERVED_MASK 0x3FFFFFFF
+#define SRC_ENTRY_HEADER_RESERVED_SHIFT 2
+ __le32 magic_number;
+ struct regpair next_ptr;
+};
+
+/* Enumeration for address type */
+enum src_header_next_ptr_type_enum {
+ e_physical_addr,
+ e_logical_addr,
+ MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM
+};
+
/* Status block structure */
-struct status_block_e4 {
- __le16 pi_array[PIS_PER_SB_E4];
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
__le32 sb_num;
-#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
/* Tdif context */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cd1207ad4ada..c84e08bc6802 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -67,6 +67,7 @@
/* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
#define ETH_RSS_KEY_SIZE_REGS 10
#define ETH_RSS_ENGINE_NUM_K2 207
#define ETH_RSS_ENGINE_NUM_BB 127
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 68eda1c21cde..7ba0abc867f1 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -150,49 +150,49 @@ struct ystorm_fcoe_task_st_ctx {
u8 reserved2[8];
};
-struct e4_ystorm_fcoe_task_ag_ctx {
+struct ystorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -206,73 +206,73 @@ struct e4_ystorm_fcoe_task_ag_ctx {
__le32 reg2;
};
-struct e4_tstorm_fcoe_task_ag_ctx {
+struct tstorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -352,49 +352,49 @@ struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
};
-struct e4_mstorm_fcoe_task_ag_ctx {
+struct mstorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -440,56 +440,56 @@ struct mstorm_fcoe_task_st_ctx {
struct scsi_cached_sges data_desc;
};
-struct e4_ustorm_fcoe_task_ag_ctx {
+struct ustorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -499,18 +499,18 @@ struct e4_ustorm_fcoe_task_ag_ctx {
};
/* FCoE task context */
-struct e4_fcoe_task_context {
+struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
- struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
- struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
- struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
struct rdif_task_context rdif_context;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 157019f716f1..1a60285a01e3 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -714,49 +714,49 @@ struct ystorm_iscsi_task_st_ctx {
union iscsi_task_hdr pdu_hdr;
};
-struct e4_ystorm_iscsi_task_ag_ctx {
+struct ystorm_iscsi_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 TTT;
u8 byte3;
@@ -764,49 +764,49 @@ struct e4_ystorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_mstorm_iscsi_task_ag_ctx {
+struct mstorm_iscsi_task_ag_ctx {
u8 cdu_validation;
u8 byte1;
__le16 task_cid;
u8 flags0;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
u8 flags1;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -814,56 +814,56 @@ struct e4_mstorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_ustorm_iscsi_task_ag_ctx {
+struct ustorm_iscsi_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
@@ -952,14 +952,14 @@ struct ustorm_iscsi_task_st_ctx {
};
/* iscsi task context */
-struct e4_iscsi_task_context {
+struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
- struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
struct regpair mstorm_ag_padding[2];
- struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
struct mstorm_iscsi_task_st_ctx mstorm_st_context;
struct ustorm_iscsi_task_st_ctx ustorm_st_context;
struct rdif_task_context rdif_context;
@@ -1431,73 +1431,73 @@ struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_tcp_pkt_cnt;
};
-struct e4_tstorm_iscsi_task_ag_ctx {
+struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h
index 5a2ab0606308..cc7c7481a0e0 100644
--- a/include/linux/qed/nvmetcp_common.h
+++ b/include/linux/qed/nvmetcp_common.h
@@ -410,7 +410,7 @@ struct e5_ystorm_nvmetcp_task_ag_ctx {
u8 byte2;
u8 byte3;
u8 byte4;
- u8 e4_reserved7;
+ u8 reserved7;
};
struct e5_mstorm_nvmetcp_task_ag_ctx {
@@ -445,7 +445,7 @@ struct e5_mstorm_nvmetcp_task_ag_ctx {
u8 byte2;
u8 byte3;
u8 byte4;
- u8 e4_reserved7;
+ u8 reserved7;
};
struct e5_ustorm_nvmetcp_task_ag_ctx {
@@ -489,17 +489,17 @@ struct e5_ustorm_nvmetcp_task_ag_ctx {
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
u8 flags4;
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_MASK 0x3
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_SHIFT 0
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_MASK 0x1
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_SHIFT 2
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_MASK 0x1
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
u8 byte2;
u8 byte3;
- u8 e4_reserved8;
+ u8 reserved8;
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index f34dbd0db795..a84063492c71 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
}
/**
- * @brief qed_chain_advance_page -
+ * qed_chain_advance_page(): Advance the next element across pages for a
+ * linked chain.
*
- * Advance the next element across pages for a linked chain
+ * @p_chain: P_chain.
+ * @p_next_elem: P_next_elem.
+ * @idx_to_inc: Idx_to_inc.
+ * @page_to_inc: page_to_inc.
*
- * @param p_chain
- * @param p_next_elem
- * @param idx_to_inc
- * @param page_to_inc
+ * Return: Void.
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
@@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain,
} while (0)
/**
- * @brief qed_chain_return_produced -
+ * qed_chain_return_produced(): A chain in which the driver "Produces"
+ * elements should use this API
+ * to indicate previous produced elements
+ * are now consumed.
*
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
@@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_produce -
+ * qed_chain_produce(): A chain in which the driver "Produces"
+ * elements should use this to get a pointer to
+ * the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for
+ * new element.
*
- * A chain in which the driver "Produces" elements should use this to get
- * a pointer to the next element which can be "Produced". It's driver
- * responsibility to validate that the chain has room for new element.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to next element
+ * Return: void*, a pointer to next element.
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
@@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_capacity -
- *
- * Get the maximum number of BDs in chain
+ * qed_chain_get_capacity(): Get the maximum number of BDs in chain
*
- * @param p_chain
- * @param num
+ * @p_chain: Chain.
*
- * @return number of unusable BDs
+ * Return: number of unusable BDs.
*/
static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
@@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_recycle_consumed -
+ * qed_chain_recycle_consumed(): Returns an element which was
+ * previously consumed;
+ * Increments producers so they could
+ * be written to FW.
*
- * Returns an element which was previously consumed;
- * Increments producers so they could be written to FW.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
@@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_consume -
+ * qed_chain_consume(): A Chain in which the driver utilizes data written
+ * by a different source (i.e., FW) should use this to
+ * access passed buffers.
*
- * A Chain in which the driver utilizes data written by a different source
- * (i.e., FW) should use this to access passed buffers.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to the next buffer written
+ * Return: void*, a pointer to the next buffer written.
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
@@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_reset - Resets the chain to its start state
+ * qed_chain_reset(): Resets the chain to its start state.
+ *
+ * @p_chain: pointer to a previously allocated chain.
*
- * @param p_chain pointer to a previously allocated chain
+ * Return Void.
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
@@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_last_elem -
+ * qed_chain_get_last_elem(): Returns a pointer to the last element of the
+ * chain.
*
- * Returns a pointer to the last element of the chain
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*
+ * Return: void*.
*/
static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
@@ -563,10 +565,13 @@ out:
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given value
+ * qed_chain_set_prod(): sets the prod to the given value.
+ *
+ * @p_chain: Chain.
+ * @prod_idx: Prod Idx.
+ * @p_prod_elem: Prod elem.
*
- * @param prod_idx
- * @param p_prod_elem
+ * Return Void.
*/
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
@@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
}
/**
- * @brief qed_chain_pbl_zero_mem - set chain memory to 0
+ * qed_chain_pbl_zero_mem(): set chain memory to 0.
+ *
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 812a4d751163..e1bf3219b4e6 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -145,12 +145,6 @@ struct qed_filter_mcast_params {
unsigned char mac[64][ETH_ALEN];
};
-union qed_filter_type_params {
- enum qed_filter_rx_mode_type accept_flags;
- struct qed_filter_ucast_params ucast;
- struct qed_filter_mcast_params mcast;
-};
-
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
@@ -158,11 +152,6 @@ enum qed_filter_type {
QED_MAX_FILTER_TYPES,
};
-struct qed_filter_params {
- enum qed_filter_type type;
- union qed_filter_type_params filter;
-};
-
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
@@ -314,8 +303,14 @@ struct qed_eth_ops {
int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
- int (*filter_config)(struct qed_dev *cdev,
- struct qed_filter_params *params);
+ int (*filter_config_rx_mode)(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type);
+
+ int (*filter_config_ucast)(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params);
+
+ int (*filter_config_mcast)(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params);
int (*fastpath_stop)(struct qed_dev *cdev);
@@ -336,7 +331,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
- int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 850b98991670..0dae7fcc5ef2 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -24,6 +24,9 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <net/devlink.h>
+#define QED_TX_SWS_TIMER_DFLT 500
+#define QED_TWO_MSL_TIMER_DFLT 4000
+
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
DCBX_PROTOCOL_FCOE,
@@ -588,7 +591,7 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@@ -613,7 +616,6 @@ enum qed_hw_err_type {
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
- QED_DEV_TYPE_E5,
};
struct qed_dev_info {
@@ -819,47 +821,47 @@ struct qed_common_cb_ops {
struct qed_selftest_ops {
/**
- * @brief selftest_interrupt - Perform interrupt test
+ * selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_interrupt)(struct qed_dev *cdev);
/**
- * @brief selftest_memory - Perform memory test
+ * selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_memory)(struct qed_dev *cdev);
/**
- * @brief selftest_register - Perform register test
+ * selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_register)(struct qed_dev *cdev);
/**
- * @brief selftest_clock - Perform clock test
+ * selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
- * @brief selftest_nvram - Perform nvram test
+ * selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
@@ -927,47 +929,53 @@ struct qed_common_ops {
enum qed_hw_err_type err_type);
/**
- * @brief can_link_change - can the instance change the link or not
+ * can_link_change(): can the instance change the link or not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return true if link-change is allowed, false otherwise.
+ * Return: true if link-change is allowed, false otherwise.
*/
bool (*can_link_change)(struct qed_dev *cdev);
/**
- * @brief set_link - set links according to params
+ * set_link(): set links according to params.
*
- * @param cdev
- * @param params - values used to override the default link configuration
+ * @cdev: Qed dev pointer.
+ * @params: values used to override the default link configuration.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_link)(struct qed_dev *cdev,
struct qed_link_params *params);
/**
- * @brief get_link - returns the current link state.
+ * get_link(): returns the current link state.
+ *
+ * @cdev: Qed dev pointer.
+ * @if_link: structure to be filled with current link configuration.
*
- * @param cdev
- * @param if_link - structure to be filled with current link configuration.
+ * Return: Void.
*/
void (*get_link)(struct qed_dev *cdev,
struct qed_link_output *if_link);
/**
- * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ * drain(): drains chip in case Tx completions fail to arrive due to pause.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
*/
int (*drain)(struct qed_dev *cdev);
/**
- * @brief update_msglvl - update module debug level
+ * update_msglvl(): update module debug level.
+ *
+ * @cdev: Qed dev pointer.
+ * @dp_module: Debug module.
+ * @dp_level: Debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * Return: Void.
*/
void (*update_msglvl)(struct qed_dev *cdev,
u32 dp_module,
@@ -981,70 +989,73 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
- * @brief nvm_flash - Flash nvm data.
+ * nvm_flash(): Flash nvm data.
*
- * @param cdev
- * @param name - file containing the data
+ * @cdev: Qed dev pointer.
+ * @name: file containing the data.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_flash)(struct qed_dev *cdev, const char *name);
/**
- * @brief nvm_get_image - reads an entire image from nvram
+ * nvm_get_image(): reads an entire image from nvram.
*
- * @param cdev
- * @param type - type of the request nvram image
- * @param buf - preallocated buffer to fill with the image
- * @param len - length of the allocated buffer
+ * @cdev: Qed dev pointer.
+ * @type: type of the request nvram image.
+ * @buf: preallocated buffer to fill with the image.
+ * @len: length of the allocated buffer.
*
- * @return 0 on success, error otherwise
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_get_image)(struct qed_dev *cdev,
enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief set_coalesce - Configure Rx coalesce value in usec
+ * set_coalesce(): Configure Rx coalesce value in usec.
*
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- * @param qid - Queue index
- * @param sb_id - Status Block Id
+ * @cdev: Qed dev pointer.
+ * @rx_coal: Rx coalesce value in usec.
+ * @tx_coal: Tx coalesce value in usec.
+ * @handle: Handle.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev,
u16 rx_coal, u16 tx_coal, void *handle);
/**
- * @brief set_led - Configure LED mode
+ * set_led() - Configure LED mode.
*
- * @param cdev
- * @param mode - LED mode
+ * @cdev: Qed dev pointer.
+ * @mode: LED mode.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
/**
- * @brief attn_clr_enable - Prevent attentions from being reasserted
+ * attn_clr_enable(): Prevent attentions from being reasserted.
*
- * @param cdev
- * @param clr_enable
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable.
+ *
+ * Return: Void.
*/
void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Dddress of where db_data is stored.
+ * @db_width: Doorbell is 32b or 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_is_32b - doorbell is 32b pr 64b
- * @param db_is_user - doorbell recovery addresses are user or kernel space
+ * Return: Int.
*/
int (*db_recovery_add)(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -1053,114 +1064,130 @@ struct qed_common_ops {
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * db_recovery_del(): remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
- * entry to delete.
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address where db_data is stored. Serves as key for the
+ * entry to delete.
+ *
+ * Return: Int.
*/
int (*db_recovery_del)(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
/**
- * @brief recovery_process - Trigger a recovery process
+ * recovery_process(): Trigger a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_process)(struct qed_dev *cdev);
/**
- * @brief recovery_prolog - Execute the prolog operations of a recovery process
+ * recovery_prolog(): Execute the prolog operations of a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_prolog)(struct qed_dev *cdev);
/**
- * @brief update_drv_state - API to inform the change in the driver state.
+ * update_drv_state(): API to inform the change in the driver state.
*
- * @param cdev
- * @param active
+ * @cdev: Qed dev pointer.
+ * @active: Active
*
+ * Return: Int.
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
- * @brief update_mac - API to inform the change in the mac address
+ * update_mac(): API to inform the change in the mac address.
*
- * @param cdev
- * @param mac
+ * @cdev: Qed dev pointer.
+ * @mac: MAC.
*
+ * Return: Int.
*/
- int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
/**
- * @brief update_mtu - API to inform the change in the mtu
+ * update_mtu(): API to inform the change in the mtu.
*
- * @param cdev
- * @param mtu
+ * @cdev: Qed dev pointer.
+ * @mtu: MTU.
*
+ * Return: Int.
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
- * @brief update_wol - update of changes in the WoL configuration
+ * update_wol(): Update of changes in the WoL configuration.
*
- * @param cdev
- * @param enabled - true iff WoL should be enabled.
+ * @cdev: Qed dev pointer.
+ * @enabled: true iff WoL should be enabled.
+ *
+ * Return: Int.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
/**
- * @brief read_module_eeprom
+ * read_module_eeprom(): Read EEPROM.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: buffer.
+ * @dev_addr: PHY device memory region.
+ * @offset: offset into eeprom contents to be read.
+ * @len: buffer length, i.e., max bytes to be read.
*
- * @param cdev
- * @param buf - buffer
- * @param dev_addr - PHY device memory region
- * @param offset - offset into eeprom contents to be read
- * @param len - buffer length, i.e., max bytes to be read
+ * Return: Int.
*/
int (*read_module_eeprom)(struct qed_dev *cdev,
char *buf, u8 dev_addr, u32 offset, u32 len);
/**
- * @brief get_affin_hwfn_idx
+ * get_affin_hwfn_idx(): Get affine HW function.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: u8.
*/
u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param buf - buffer
- * @param cmd - NVM CFG command id
- * @param entity_id - Entity id
+ * read_nvm_cfg(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: Buffer.
+ * @cmd: NVM CFG command id.
+ * @entity_id: Entity id.
*
+ * Return: Int.
*/
int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
u32 entity_id);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param cmd - NVM CFG command id
+ * read_nvm_cfg_len(): Read NVM config attribute value.
*
- * @return config id length, 0 on error.
+ * @cdev: Qed dev pointer.
+ * @cmd: NVM CFG command id.
+ *
+ * Return: config id length, 0 on error.
*/
int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
/**
- * @brief set_grc_config - Configure value for grc config id.
- * @param cdev
- * @param cfg_id - grc config id
- * @param val - grc config value
+ * set_grc_config(): Configure value for grc config id.
+ *
+ * @cdev: Qed dev pointer.
+ * @cfg_id: grc config id
+ * @val: grc config value
*
+ * Return: Int.
*/
int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
@@ -1386,7 +1413,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_E4_PROD_INDEX_MASK;
+ STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
@@ -1397,18 +1424,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/**
+ * qed_sb_ack(): This function creates an update command for interrupts
+ * that is written to the IGU.
*
- * @brief This function creates an update command for interrupts that is
- * written to the IGU.
- *
- * @param sb_info - This is the structure allocated and
- * initialized per status block. Assumption is
- * that it was initialized using qed_sb_init
- * @param int_cmd - Enable/Disable/Nop
- * @param upd_flg - whether igu consumer should be
- * updated.
+ * @sb_info: This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @int_cmd: Enable/Disable/Nop
+ * @upd_flg: Whether igu consumer should be updated.
*
- * @return inline void
+ * Return: inline void.
*/
static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 04180d9af560..494cdc3cd840 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops {
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
- * @change_mac Change MAC of interface
+ * @change_mac: Change MAC of interface
* @param cdev
* @param handle - the connection handle.
* @param mac - new MAC to configure.
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index ff808d248883..5b67cd03276e 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -208,57 +208,57 @@ enum qed_ll2_xmit_flags {
struct qed_ll2_ops {
/**
- * @brief start - initializes ll2
+ * start(): Initializes ll2.
*
- * @param cdev
- * @param params - protocol driver configuration for the ll2.
+ * @cdev: Qed dev pointer.
+ * @params: Protocol driver configuration for the ll2.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
/**
- * @brief stop - stops the ll2
+ * stop(): Stops the ll2
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*stop)(struct qed_dev *cdev);
/**
- * @brief start_xmit - transmits an skb over the ll2 interface
+ * start_xmit(): Transmits an skb over the ll2 interface
*
- * @param cdev
- * @param skb
- * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
+ * @cdev: Qed dev pointer.
+ * @skb: SKB.
+ * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags);
/**
- * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * register_cb_ops(): Protocol driver register the callback for Rx/Tx
* packets. Should be called before `start'.
*
- * @param cdev
- * @param cookie - to be passed to the callback functions.
- * @param ops - the callback functions to register for Rx / Tx.
+ * @cdev: Qed dev pointer.
+ * @cookie: to be passed to the callback functions.
+ * @ops: the callback functions to register for Rx / Tx.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
void (*register_cb_ops)(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie);
/**
- * @brief get LL2 related statistics
+ * get_stats(): Get LL2 related statistics.
*
- * @param cdev
- * @param stats - pointer to struct that would be filled with stats
+ * @cdev: Qed dev pointer.
+ * @stats: Pointer to struct that would be filled with stats.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
};
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
index 14671bc19ed1..1d51df347560 100644
--- a/include/linux/qed/qed_nvmetcp_if.h
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -171,6 +171,23 @@ struct nvmetcp_task_params {
* @param dest_port
* @clear_all_filters: Clear all filters.
* @param cdev
+ * @init_read_io: Init read IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_write_io: Init write IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_icreq_exchange: Exchange ICReq.
+ * @task_params
+ * @init_conn_req_pdu_hdr
+ * @tx_sgl_task_params
+ * @rx_sgl_task_params
+ * @init_task_cleanup: Init task cleanup.
+ * @task_params
*/
struct qed_nvmetcp_ops {
const struct qed_common_ops *common;
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index aeb242cefebf..3b76c07fbcf8 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -662,7 +662,8 @@ struct qed_rdma_ops {
u8 connection_handle,
struct qed_ll2_stats *p_stats);
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
- u8 *old_mac_address, u8 *new_mac_address);
+ u8 *old_mac_address,
+ const u8 *new_mac_address);
int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index bab078b25834..6dfed163ab6c 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -27,6 +27,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32 * 1024)
+#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 64ad900ac742..f7c1d21c2f39 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -9,8 +9,10 @@
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
-#include <linux/kernel.h>
+#include <linux/gfp.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/math.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 434d12fe2d4f..5e0beb5c5659 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -71,7 +71,8 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void)
{
preempt_enable();
- rcu_read_unlock_strict();
+ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
+ rcu_read_unlock_strict();
}
static inline int rcu_preempt_depth(void)
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index 86c8f6c98412..6f9c35817398 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -31,7 +31,7 @@ static inline int rcu_read_lock_trace_held(void)
#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
+void rcu_read_unlock_trace_special(struct task_struct *t);
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -80,7 +80,8 @@ static inline void rcu_read_unlock_trace(void)
WRITE_ONCE(t->trc_reader_nesting, nesting);
return; // We assume shallow reader nesting.
}
- rcu_read_unlock_trace_special(t, nesting);
+ WARN_ON_ONCE(nesting != 0);
+ rcu_read_unlock_trace_special(t);
}
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index d780dbb8b423..b62e45aa1dd3 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -10,7 +10,7 @@
#include <linux/regulator/machine.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#define LP872X_MAX_REGULATORS 9
@@ -40,11 +40,6 @@ enum lp872x_regulator_id {
LP872X_ID_MAX,
};
-enum lp872x_dvs_state {
- DVS_LOW = GPIOF_OUT_INIT_LOW,
- DVS_HIGH = GPIOF_OUT_INIT_HIGH,
-};
-
enum lp872x_dvs_sel {
SEL_V1,
SEL_V2,
@@ -52,14 +47,14 @@ enum lp872x_dvs_sel {
/**
* lp872x_dvs
- * @gpio : gpio pin number for dvs control
+ * @gpio : gpio descriptor for dvs control
* @vsel : dvs selector for buck v1 or buck v2 register
* @init_state : initial dvs pin state
*/
struct lp872x_dvs {
- int gpio;
+ struct gpio_desc *gpio;
enum lp872x_dvs_sel vsel;
- enum lp872x_dvs_state init_state;
+ enum gpiod_flags init_state;
};
/**
@@ -78,14 +73,14 @@ struct lp872x_regulator_data {
* @update_config : if LP872X_GENERAL_CFG register is updated, set true
* @regulator_data : platform regulator id and init data
* @dvs : dvs data for buck voltage control
- * @enable_gpio : gpio pin number for enable control
+ * @enable_gpio : gpio descriptor for enable control
*/
struct lp872x_platform_data {
u8 general_config;
bool update_config;
struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
struct lp872x_dvs *dvs;
- int enable_gpio;
+ struct gpio_desc *enable_gpio;
};
#endif
diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h
index 94a90c06f1e5..398e74a1d941 100644
--- a/include/linux/regulator/tps62360.h
+++ b/include/linux/regulator/tps62360.h
@@ -19,10 +19,6 @@
* @en_discharge: Enable discharge the output capacitor via internal
* register.
* @en_internal_pulldn: internal pull down enable or not.
- * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with
- * fixed logic.
- * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with
- * fixed logic.
* @vsel0_def_state: Default state of vsel0. 1 if it is high else 0.
* @vsel1_def_state: Default state of vsel1. 1 if it is high else 0.
*/
@@ -30,8 +26,6 @@ struct tps62360_regulator_platform_data {
struct regulator_init_data *reg_init_data;
bool en_discharge;
bool en_internal_pulldn;
- int vsel0_gpio;
- int vsel1_gpio;
int vsel0_def_state;
int vsel1_def_state;
};
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 83c09ac36b13..e0600e1e5c17 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -684,18 +684,6 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
void *priv);
int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
-static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
-{
- return container_of(vdev->dev.parent, struct rproc_vdev, dev);
-}
-
-static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
-{
- struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
-
- return rvdev->rproc;
-}
-
void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c976cc6de257..e704b1a4c06c 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -235,7 +235,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*
* returns the number of cleaned PTEs.
*/
-int page_mkclean(struct page *);
+int folio_mkclean(struct folio *);
/*
* called in munlock()/munmap() path to check for other vmas holding
@@ -295,12 +295,14 @@ static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
{
}
-static inline int page_mkclean(struct page *page)
+static inline int folio_mkclean(struct folio *folio)
{
return 0;
}
-
-
#endif /* CONFIG_MMU */
+static inline int page_mkclean(struct page *page)
+{
+ return folio_mkclean(page_folio(page));
+}
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index d97dcd049f18..02fa9116cd60 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -186,6 +186,8 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+
#else
static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
@@ -231,7 +233,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev
/* This shouldn't be possible */
WARN_ON(1);
- return ERR_PTR(-ENXIO);
+ return NULL;
}
static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
@@ -296,6 +298,14 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
return 0;
}
+static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index bd611e26291d..47fd1c2d3a57 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -66,6 +66,8 @@ struct rtc_class_ops {
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
int (*read_offset)(struct device *, long *offset);
int (*set_offset)(struct device *, long offset);
+ int (*param_get)(struct device *, struct rtc_param *param);
+ int (*param_set)(struct device *, struct rtc_param *param);
};
struct rtc_device;
@@ -80,6 +82,7 @@ struct rtc_timer {
/* flags */
#define RTC_DEV_BUSY 0
+#define RTC_NO_CDEV 1
struct rtc_device {
struct device dev;
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 7ce9a51ae5c0..2c0ad417ce3c 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -30,31 +30,16 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-
-#ifndef arch_read_lock_flags
-# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#endif
-
-#ifndef arch_write_lock_flags
-# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#endif
-
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
- do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
- do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53ab11be..f1db6f17c4fb 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -157,8 +157,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
- do_raw_read_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
return flags;
}
@@ -184,8 +183,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
- do_raw_write_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
return flags;
}
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 352c6127cb90..f9348769e558 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -11,7 +11,6 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 2713e689ad66..fc0357a6e19b 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -9,8 +9,17 @@
#ifndef __LINUX_SCALE_BITMAP_H
#define __LINUX_SCALE_BITMAP_H
-#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/minmax.h>
+#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/wait.h>
struct seq_file;
@@ -427,6 +436,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
+ * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
+ * @sbq: Bitmap queue to allocate from.
+ * @nr_tags: number of tags requested
+ * @offset: offset to add to returned bits
+ *
+ * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
+ * a bit in the mask returned, and the caller must add @offset to the value to
+ * get the absolute tag value.
+ */
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset);
+
+/**
* __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
* sbitmap_queue, limiting the depth used from each word, with preemption
* already disabled.
@@ -515,6 +537,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu);
+/**
+ * sbitmap_queue_clear_batch() - Free a batch of allocated bits
+ * &struct sbitmap_queue.
+ * @sbq: Bitmap to free from.
+ * @offset: offset for each tag in array
+ * @tags: array of tags
+ * @nr_tags: number of tags in array
+ */
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags);
+
static inline int sbq_index_inc(int index)
{
return (index + 1) & (SBQ_WAIT_QUEUES - 1);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c1a927ddec64..78c351e35fec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -503,6 +503,8 @@ struct sched_statistics {
u64 block_start;
u64 block_max;
+ s64 sum_block_runtime;
+
u64 exec_max;
u64 slice_max;
@@ -522,7 +524,7 @@ struct sched_statistics {
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
#endif
-};
+} ____cacheline_aligned;
struct sched_entity {
/* For load-balancing: */
@@ -538,8 +540,6 @@ struct sched_entity {
u64 nr_migrations;
- struct sched_statistics statistics;
-
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
@@ -750,10 +750,6 @@ struct task_struct {
#ifdef CONFIG_SMP
int on_cpu;
struct __call_single_node wake_entry;
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- /* Current CPU: */
- unsigned int cpu;
-#endif
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -775,10 +771,10 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
- const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
+ const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
struct rb_node core_node;
@@ -803,6 +799,8 @@ struct task_struct {
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
+ struct sched_statistics stats;
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
@@ -1160,10 +1158,8 @@ struct task_struct {
/* Stacked block device info: */
struct bio_list *bio_list;
-#ifdef CONFIG_BLOCK
/* Stack plugging: */
struct blk_plug *plug;
-#endif
/* VM state: */
struct reclaim_state *reclaim_state;
@@ -1665,6 +1661,7 @@ extern struct pid *cad_pid;
#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
+#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
@@ -1886,10 +1883,7 @@ extern struct thread_info init_thread_info;
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
-static inline struct thread_info *task_thread_info(struct task_struct *task)
-{
- return &task->thread_info;
-}
+# define task_thread_info(task) (&(task)->thread_info)
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
@@ -2039,7 +2033,7 @@ static inline int _cond_resched(void) { return 0; }
#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
#define cond_resched() ({ \
- ___might_sleep(__FILE__, __LINE__, 0); \
+ __might_resched(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
@@ -2047,19 +2041,38 @@ extern int __cond_resched_lock(spinlock_t *lock);
extern int __cond_resched_rwlock_read(rwlock_t *lock);
extern int __cond_resched_rwlock_write(rwlock_t *lock);
-#define cond_resched_lock(lock) ({ \
- ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
- __cond_resched_lock(lock); \
+#define MIGHT_RESCHED_RCU_SHIFT 8
+#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * Non RT kernels have an elevated preempt count due to the held lock,
+ * but are not allowed to be inside a RCU read side critical section
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
+#else
+/*
+ * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
+ * cond_resched*lock() has to take that into account because it checks for
+ * preempt_count() and rcu_preempt_depth().
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS \
+ (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
+#endif
+
+#define cond_resched_lock(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_lock(lock); \
})
-#define cond_resched_rwlock_read(lock) ({ \
- __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
- __cond_resched_rwlock_read(lock); \
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_read(lock); \
})
-#define cond_resched_rwlock_write(lock) ({ \
- __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
- __cond_resched_rwlock_write(lock); \
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_write(lock); \
})
static inline void cond_resched_rcu(void)
@@ -2114,11 +2127,7 @@ static __always_inline bool need_resched(void)
static inline unsigned int task_cpu(const struct task_struct *p)
{
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- return READ_ONCE(p->cpu);
-#else
return READ_ONCE(task_thread_info(p)->cpu);
-#endif
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
@@ -2137,6 +2146,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
extern bool sched_task_on_rq(struct task_struct *p);
+extern unsigned long get_wchan(struct task_struct *p);
/*
* In order to reduce various lock holder preemption latencies provide an
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 22873d276be6..d73d314d59c6 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -11,7 +11,11 @@ enum cpu_idle_type {
CPU_MAX_IDLE_TYPES
};
+#ifdef CONFIG_SMP
extern void wake_up_if_idle(int cpu);
+#else
+static inline void wake_up_if_idle(int cpu) { }
+#endif
/*
* Idle thread specific functions to determine the need_resched
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 5561486fddef..aca874d33fe6 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -49,6 +49,35 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
+ * by far the least expensive way to do that.
+ */
+static inline void __mmdrop_delayed(struct rcu_head *rhp)
+{
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
+ __mmdrop(mm);
+}
+
+/*
+ * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
+ * kernels via RCU.
+ */
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ /* Provides a full memory barrier. See mmdrop() */
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ mmdrop(mm);
+}
+#endif
+
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index e5f4ce622ee6..23505394ef70 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -72,6 +72,17 @@ struct multiprocess_signals {
struct hlist_node node;
};
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
+};
+
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
@@ -110,6 +121,8 @@ struct signal_struct {
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
+ struct core_state *core_state; /* coredumping support */
+
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
@@ -338,6 +351,7 @@ extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int);
+extern void force_fatal_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index ef02be869cf2..ba88a6987400 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -54,7 +54,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_post_fork(struct task_struct *p);
+extern void sched_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index 2413427e439c..d10150587d81 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -25,7 +25,11 @@ static inline void *task_stack_page(const struct task_struct *task)
static inline unsigned long *end_of_stack(const struct task_struct *task)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+#else
return task->stack;
+#endif
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 8f0f778b7c91..c07bfa2d80f2 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -42,6 +42,13 @@ static inline int cpu_smt_flags(void)
}
#endif
+#ifdef CONFIG_SCHED_CLUSTER
+static inline int cpu_cluster_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES;
+}
+#endif
+
#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{
@@ -98,7 +105,7 @@ struct sched_domain {
/* idle_balance() stats */
u64 max_newidle_lb_cost;
- unsigned long next_decay_max_lb_cost;
+ unsigned long last_decay_max_lb_cost;
u64 avg_scan_cost; /* select_idle_sibling */
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
index 21c3771e6a56..988528b5da43 100644
--- a/include/linux/secretmem.h
+++ b/include/linux/secretmem.h
@@ -23,7 +23,7 @@ static inline bool page_is_secretmem(struct page *page)
mapping = (struct address_space *)
((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
- if (mapping != page->mapping)
+ if (!mapping || mapping != page->mapping)
return false;
return mapping->a_ops == &secretmem_aops;
diff --git a/include/linux/security.h b/include/linux/security.h
index 5b7288521300..bbf44a466832 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -179,7 +179,7 @@ struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
-struct sctp_endpoint;
+struct sctp_association;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -258,13 +258,13 @@ extern int security_init(void);
extern int early_security_init(void);
/* Security operations */
-int security_binder_set_context_mgr(struct task_struct *mgr);
-int security_binder_transaction(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to, struct file *file);
+int security_binder_set_context_mgr(const struct cred *mgr);
+int security_binder_transaction(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to, struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
@@ -317,8 +317,9 @@ int security_add_mnt_opt(const char *option, const char *val,
int len, void **mnt_opts);
int security_move_mount(const struct path *from_path, const struct path *to_path);
int security_dentry_init_security(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
+ const struct qstr *name,
+ const char **xattr_name, void **ctx,
+ u32 *ctxlen);
int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct qstr *name,
const struct cred *old,
@@ -508,25 +509,25 @@ static inline int early_security_init(void)
return 0;
}
-static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+static inline int security_binder_set_context_mgr(const struct cred *mgr)
{
return 0;
}
-static inline int security_binder_transaction(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transaction(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to,
+static inline int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
struct file *file)
{
return 0;
@@ -739,6 +740,7 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
const struct qstr *name,
+ const char **xattr_name,
void **ctx,
u32 *ctxlen)
{
@@ -1041,6 +1043,11 @@ static inline void security_transfer_creds(struct cred *new,
{
}
+static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ *secid = 0;
+}
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -1418,10 +1425,10 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb);
int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address, int addrlen);
-void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk);
#else /* CONFIG_SECURITY_NETWORK */
@@ -1624,7 +1631,7 @@ static inline int security_tun_dev_open(void *security)
return 0;
}
-static inline int security_sctp_assoc_request(struct sctp_endpoint *ep,
+static inline int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
@@ -1637,7 +1644,7 @@ static inline int security_sctp_bind_connect(struct sock *sk, int optname,
return 0;
}
-static inline void security_sctp_sk_clone(struct sctp_endpoint *ep,
+static inline void security_sctp_sk_clone(struct sctp_association *asoc,
struct sock *sk,
struct sock *newsk)
{
@@ -2038,4 +2045,20 @@ static inline int security_perf_event_write(struct perf_event *event)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_PERF_EVENTS */
+#ifdef CONFIG_IO_URING
+#ifdef CONFIG_SECURITY
+extern int security_uring_override_creds(const struct cred *new);
+extern int security_uring_sqpoll(void);
+#else
+static inline int security_uring_override_creds(const struct cred *new)
+{
+ return 0;
+}
+static inline int security_uring_sqpoll(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_IO_URING */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dd99569595fd..72dbb44a4573 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/cpumask.h>
@@ -135,7 +136,21 @@ static inline void seq_escape_str(struct seq_file *m, const char *src,
seq_escape_mem(m, src, strlen(src), flags, esc);
}
-void seq_escape(struct seq_file *m, const char *s, const char *esc);
+/**
+ * seq_escape - print string into buffer, escaping some characters
+ * @m: target buffer
+ * @s: NULL-terminated string
+ * @esc: set of characters that need escaping
+ *
+ * Puts string into buffer, replacing each occurrence of character from
+ * @esc with usual octal escape.
+ *
+ * Use seq_has_overflowed() to check for errors.
+ */
+static inline void seq_escape(struct seq_file *m, const char *s, const char *esc)
+{
+ seq_escape_str(m, s, ESCAPE_OCTAL, esc);
+}
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, const void *buf, size_t len,
@@ -194,7 +209,7 @@ static const struct file_operations __name ## _fops = { \
#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
- return single_open(file, __name ## _show, inode->i_private); \
+ return single_open(file, __name ## _show, PDE_DATA(inode)); \
} \
\
static const struct proc_ops __name ## _proc_ops = { \
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
deleted file mode 100644
index 3cca2b8fac43..000000000000
--- a/include/linux/seqno-fence.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- */
-
-#ifndef __LINUX_SEQNO_FENCE_H
-#define __LINUX_SEQNO_FENCE_H
-
-#include <linux/dma-fence.h>
-#include <linux/dma-buf.h>
-
-enum seqno_fence_condition {
- SEQNO_FENCE_WAIT_GEQUAL,
- SEQNO_FENCE_WAIT_NONZERO
-};
-
-struct seqno_fence {
- struct dma_fence base;
-
- const struct dma_fence_ops *ops;
- struct dma_buf *sync_buf;
- uint32_t seqno_ofs;
- enum seqno_fence_condition condition;
-};
-
-extern const struct dma_fence_ops seqno_fence_ops;
-
-/**
- * to_seqno_fence - cast a fence to a seqno_fence
- * @fence: fence to cast to a seqno_fence
- *
- * Returns NULL if the fence is not a seqno_fence,
- * or the seqno_fence otherwise.
- */
-static inline struct seqno_fence *
-to_seqno_fence(struct dma_fence *fence)
-{
- if (fence->ops != &seqno_fence_ops)
- return NULL;
- return container_of(fence, struct seqno_fence, base);
-}
-
-/**
- * seqno_fence_init - initialize a seqno fence
- * @fence: seqno_fence to initialize
- * @lock: pointer to spinlock to use for fence
- * @sync_buf: buffer containing the memory location to signal on
- * @context: the execution context this fence is a part of
- * @seqno_ofs: the offset within @sync_buf
- * @seqno: the sequence # to signal on
- * @cond: fence wait condition
- * @ops: the fence_ops for operations on this seqno fence
- *
- * This function initializes a struct seqno_fence with passed parameters,
- * and takes a reference on sync_buf which is released on fence destruction.
- *
- * A seqno_fence is a dma_fence which can complete in software when
- * enable_signaling is called, but it also completes when
- * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
- *
- * The seqno_fence will take a refcount on the sync_buf until it's
- * destroyed, but actual lifetime of sync_buf may be longer if one of the
- * callers take a reference to it.
- *
- * Certain hardware have instructions to insert this type of wait condition
- * in the command stream, so no intervention from software would be needed.
- * This type of fence can be destroyed before completed, however a reference
- * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
- * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
- * device's vm can be expensive.
- *
- * It is recommended for creators of seqno_fence to call dma_fence_signal()
- * before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check dma_fence_is_signaled()
- * before submitting instructions for the hardware to wait on the fence.
- * However, when ops.enable_signaling is not called, it doesn't have to be
- * done as soon as possible, just before there's any real danger of seqno
- * wraparound.
- */
-static inline void
-seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
- struct dma_buf *sync_buf, uint32_t context,
- uint32_t seqno_ofs, uint32_t seqno,
- enum seqno_fence_condition cond,
- const struct dma_fence_ops *ops)
-{
- BUG_ON(!fence || !sync_buf || !ops);
- BUG_ON(!ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
-
- /*
- * ops is used in dma_fence_init for get_driver_name, so needs to be
- * initialized first
- */
- fence->ops = ops;
- dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
- get_dma_buf(sync_buf);
- fence->sync_buf = sync_buf;
- fence->seqno_ofs = seqno_ofs;
- fence->condition = cond;
-}
-
-#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 9814fff58a69..76fbf92b04d9 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -93,4 +93,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker);
extern int register_shrinker(struct shrinker *shrinker);
extern void unregister_shrinker(struct shrinker *shrinker);
extern void free_prealloced_shrinker(struct shrinker *shrinker);
+extern void synchronize_shrinkers(void);
#endif
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 3f96a6374e4f..a6db6f2ae113 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -126,7 +126,6 @@ static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
-#include <linux/string.h>
#define _SIG_SET_BINOP(name, op) \
static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
@@ -464,6 +463,12 @@ int __save_altstack(stack_t __user *, unsigned long);
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
} while (0);
+#ifdef CONFIG_DYNAMIC_SIGFRAME
+bool sigaltstack_size_valid(size_t ss_size);
+#else
+static inline bool sigaltstack_size_valid(size_t size) { return true; }
+#endif /* !CONFIG_DYNAMIC_SIGFRAME */
+
#ifdef CONFIG_PROC_FS
struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index 34cb28b8f16c..a70b2bdbf4d9 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -70,6 +70,9 @@ struct ksignal {
int sig;
};
+/* Used to kill the race between sigaction and forced signals */
+#define SA_IMMUTABLE 0x00800000
+
#ifndef __ARCH_UAPI_SA_FLAGS
#ifdef SA_RESTORER
#define __ARCH_UAPI_SA_FLAGS SA_RESTORER
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 841e2f0f5240..686a666d073d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -454,9 +454,15 @@ enum {
* all frags to avoid possible bad checksum
*/
SKBFL_SHARED_FRAG = BIT(1),
+
+ /* segment contains only zerocopy data and should not be
+ * charged to the kernel memory.
+ */
+ SKBFL_PURE_ZEROCOPY = BIT(2),
};
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY)
/*
* The callback notifies userspace to release buffers when skb DMA is done in
@@ -652,6 +658,7 @@ typedef unsigned char *sk_buff_data_t;
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
+ * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
@@ -868,6 +875,9 @@ struct sk_buff {
#ifdef CONFIG_NET_REDIRECT
__u8 from_ingress:1;
#endif
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ __u8 nf_skip_egress:1;
+#endif
#ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1;
#endif
@@ -1460,6 +1470,17 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline bool skb_zcopy_pure(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
+}
+
+static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+ return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
+}
+
static inline void net_zcopy_get(struct ubuf_info *uarg)
{
refcount_inc(&uarg->refcnt);
@@ -1524,7 +1545,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (!skb_zcopy_is_nouarg(skb))
uarg->callback(skb, uarg, zerocopy_success);
- skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
}
@@ -1671,6 +1692,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
return 0;
}
+/* This variant of skb_unclone() makes sure skb->truesize is not changed */
+static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(gfpflags_allow_blocking(pri));
+
+ if (skb_cloned(skb)) {
+ unsigned int save = skb->truesize;
+ int res;
+
+ res = pskb_expand_head(skb, 0, 0, pri);
+ skb->truesize = save;
+ return res;
+ }
+ return 0;
+}
+
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
@@ -4240,6 +4277,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP,
#endif
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ SKB_EXT_MCTP,
+#endif
SKB_EXT_NUM, /* must be last */
};
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 14ab0c0bc924..584d94be9c8b 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -128,6 +128,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
+bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
{
@@ -506,10 +507,36 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
+static inline bool sk_is_tcp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
+}
+
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
-/* We only have one bit so far. */
-#define BPF_F_PTR_MASK ~(BPF_F_INGRESS)
+#define BPF_F_STRPARSER (1UL << 1)
+
+/* We only have two bits so far. */
+#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
+
+static inline bool skb_bpf_strparser(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return sk_redir & BPF_F_STRPARSER;
+}
+
+static inline void skb_bpf_set_strparser(struct sk_buff *skb)
+{
+ skb->_sk_redir |= BPF_F_STRPARSER;
+}
static inline bool skb_bpf_ingress(const struct sk_buff *skb)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 083f3ce550bc..181045148b06 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -142,8 +142,6 @@ struct mem_cgroup;
void __init kmem_cache_init(void);
bool slab_is_available(void);
-extern bool usercopy_fallback;
-
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
@@ -152,8 +150,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
-void kmem_cache_destroy(struct kmem_cache *);
-int kmem_cache_shrink(struct kmem_cache *);
+void kmem_cache_destroy(struct kmem_cache *s);
+int kmem_cache_shrink(struct kmem_cache *s);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -181,11 +179,11 @@ int kmem_cache_shrink(struct kmem_cache *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kfree_sensitive(const void *);
-size_t __ksize(const void *);
-size_t ksize(const void *);
+void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
+void kfree(const void *objp);
+void kfree_sensitive(const void *objp);
+size_t __ksize(const void *objp);
+size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
bool kmem_valid_obj(void *object);
void kmem_dump_obj(void *object);
@@ -425,9 +423,9 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
-void kmem_cache_free(struct kmem_cache *, void *);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
+void kmem_cache_free(struct kmem_cache *s, void *objp);
/*
* Bulk allocation and freeing operations. These are accelerated in an
@@ -436,8 +434,8 @@ void kmem_cache_free(struct kmem_cache *, void *);
*
* Note that interrupts must be enabled when calling these functions.
*/
-void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
/*
* Caller must not use kfree_bulk() on memory not originally allocated
@@ -449,10 +447,12 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
+ __alloc_size(1);
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
+ __malloc;
#else
-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc(size, flags);
}
@@ -464,25 +464,24 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_slab_alignment __alloc_size(3);
#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_slab_alignment
+ __alloc_size(4);
#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags, int node, size_t size)
{
return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */
#else /* CONFIG_TRACING */
-static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
+static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
+ gfp_t flags, size_t size)
{
void *ret = kmem_cache_alloc(s, flags);
@@ -490,10 +489,8 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
return ret;
}
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
@@ -502,19 +499,21 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
+ __alloc_size(1);
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+ __assume_page_alignment __alloc_size(1);
#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
+ unsigned int order)
{
return kmalloc_order(size, flags, order);
}
#endif
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
return kmalloc_order_trace(size, flags, order);
@@ -574,7 +573,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Try really hard to succeed the allocation but fail
* eventually.
*/
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
#ifndef CONFIG_SLOB
@@ -596,7 +595,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
@@ -620,7 +619,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -638,8 +637,10 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*/
-static __must_check inline void *
-krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
+static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
+ size_t new_n,
+ size_t new_size,
+ gfp_t flags)
{
size_t bytes;
@@ -655,7 +656,7 @@ krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
@@ -668,12 +669,13 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
+ __alloc_size(1);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
-static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
- int node)
+static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+ int node)
{
size_t bytes;
@@ -684,14 +686,15 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
return __kmalloc_node(bytes, flags, node);
}
-static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}
#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
+extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
@@ -716,7 +719,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kzalloc(size_t size, gfp_t flags)
+static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | __GFP_ZERO);
}
@@ -727,11 +730,45 @@ static inline void *kzalloc(size_t size, gfp_t flags)
* @flags: the type of memory to allocate (see kmalloc).
* @node: memory node from which to allocate
*/
-static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
+static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
+static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return kvmalloc(bytes, flags);
+}
+
+static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kvmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
+extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+ __alloc_size(3);
+extern void kvfree(const void *addr);
+extern void kvfree_sensitive(const void *addr, size_t len);
+
unsigned int kmem_cache_size(struct kmem_cache *s);
void __init kmem_cache_init_late(void);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 85499f0586b0..0fa751b946fa 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -99,6 +99,8 @@ struct kmem_cache {
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
+ /* Number of per cpu partial pages to keep around */
+ unsigned int cpu_partial_pages;
#endif
struct kmem_cache_order_objects oo;
@@ -141,17 +143,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_cpu_partial(s) ((s)->cpu_partial)
-#define slub_set_cpu_partial(s, n) \
-({ \
- slub_cpu_partial(s) = (n); \
-})
-#else
-#define slub_cpu_partial(s) (0)
-#define slub_set_cpu_partial(s, n)
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
-
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 510519e8a1eb..a80ab58ae3f1 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -108,7 +108,6 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
#ifdef CONFIG_SMP
#include <linux/preempt.h>
-#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/smp.h>
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
index fa1d6af0164e..d683251a0b40 100644
--- a/include/linux/soc/marvell/octeontx2/asm.h
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -5,6 +5,7 @@
#ifndef __SOC_OTX2_ASM_H
#define __SOC_OTX2_ASM_H
+#include <linux/types.h>
#if defined(CONFIG_ARM64)
/*
* otx2_lmt_flush is used for LMT store operation.
@@ -34,9 +35,23 @@
: [rf] "+r"(val) \
: [rs] "r"(addr)); \
})
+
+static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ asm volatile (".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
#else
#define otx2_lmt_flush(ioaddr) ({ 0; })
#define cn10k_lmt_flush(val, addr) ({ addr = val; })
+#define otx2_atomic64_fetch_add(incr, ptr) ({ incr; })
#endif
#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index 2228bf6133da..4bba275e235a 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -29,13 +29,16 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_OVL_2L1,
+ DDP_COMPONENT_OVL_2L2,
DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_RDMA4,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_WDMA0,
DDP_COMPONENT_WDMA1,
diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h
new file mode 100644
index 000000000000..3c2a82e606f8
--- /dev/null
+++ b/include/linux/soc/qcom/qcom_aoss.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_AOSS_H__
+#define __QCOM_AOSS_H__
+
+#include <linux/err.h>
+#include <linux/device.h>
+
+struct qmp;
+
+#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP)
+
+int qmp_send(struct qmp *qmp, const void *data, size_t len);
+struct qmp *qmp_get(struct device *dev);
+void qmp_put(struct qmp *qmp);
+
+#else
+
+static inline int qmp_send(struct qmp *qmp, const void *data, size_t len)
+{
+ return -ENODEV;
+}
+
+static inline struct qmp *qmp_get(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void qmp_put(struct qmp *qmp)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 60e66fc9b6bf..860dd8cdf9f3 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -38,6 +38,8 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_IPA_CLK 0x617069
#define QCOM_SMD_RPM_CE_CLK 0x6563
#define QCOM_SMD_RPM_AGGR_CLK 0x72676761
+#define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768
+#define QCOM_SMD_RPM_PKA_CLK 0x616b70
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h
index 8bca6763f99c..62f0e2531068 100644
--- a/include/linux/soc/samsung/exynos-chipid.h
+++ b/include/linux/soc/samsung/exynos-chipid.h
@@ -9,10 +9,8 @@
#define __LINUX_SOC_EXYNOS_CHIPID_H
#define EXYNOS_CHIPID_REG_PRO_ID 0x00
-#define EXYNOS_SUBREV_MASK (0xf << 4)
-#define EXYNOS_MAINREV_MASK (0xf << 0)
-#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \
- EXYNOS_MAINREV_MASK)
+#define EXYNOS_REV_PART_MASK 0xf
+#define EXYNOS_REV_PART_SHIFT 4
#define EXYNOS_MASK 0xfffff000
#define EXYNOS_CHIPID_REG_PKG_ID 0x04
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 041d6032a348..8ef26d89ef49 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -364,6 +364,8 @@ struct ucred {
#define SOL_KCM 281
#define SOL_TLS 282
#define SOL_XDP 283
+#define SOL_MPTCP 284
+#define SOL_MCTP 285
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index 1a5eaef3b7f2..d424c1aadf38 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -1,17 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/spi/ads7846.h */
-/* Touchscreen characteristics vary between boards and models. The
- * platform_data for the device's "struct device" holds this information.
- *
- * It's OK if the min/max values are zero.
- */
-enum ads7846_filter {
- ADS7846_FILTER_OK,
- ADS7846_FILTER_REPEAT,
- ADS7846_FILTER_IGNORE,
-};
-
struct ads7846_platform_data {
u16 model; /* 7843, 7845, 7846, 7873. */
u16 vref_delay_usecs; /* 0 for external vref; etc */
@@ -51,10 +40,6 @@ struct ads7846_platform_data {
int gpio_pendown_debounce; /* platform specific debounce time for
* the gpio_pendown */
int (*get_pendown_state)(void);
- int (*filter_init) (const struct ads7846_platform_data *pdata,
- void **filter_data);
- int (*filter) (void *filter_data, int data_idx, int *val);
- void (*filter_cleanup)(void *filter_data);
void (*wait_for_sync)(void);
bool wakeup;
unsigned long irq_flags;
diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h
index 21449067aedb..e392c53758bc 100644
--- a/include/linux/spi/max7301.h
+++ b/include/linux/spi/max7301.h
@@ -31,6 +31,6 @@ struct max7301_platform_data {
u32 input_pullup_active;
};
-extern int __max730x_remove(struct device *dev);
+extern void __max730x_remove(struct device *dev);
extern int __max730x_probe(struct max7301 *ts);
#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 6b0b686f6f90..eb7ac8a1e03c 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -78,10 +78,6 @@ struct spi_statistics {
unsigned long transfers_split_maxsize;
};
-void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
- struct spi_transfer *xfer,
- struct spi_controller *ctlr);
-
#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
do { \
unsigned long flags; \
@@ -763,8 +759,6 @@ extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
-extern struct spi_controller *spi_busnum_to_master(u16 busnum);
-
/*
* SPI resource management while processing a SPI message
*/
@@ -788,15 +782,6 @@ struct spi_res {
unsigned long long data[]; /* guarantee ull alignment */
};
-extern void *spi_res_alloc(struct spi_device *spi,
- spi_res_release_t release,
- size_t size, gfp_t gfp);
-extern void spi_res_add(struct spi_message *message, void *res);
-extern void spi_res_free(void *res);
-
-extern void spi_res_release(struct spi_controller *ctlr,
- struct spi_message *message);
-
/*---------------------------------------------------------------------------*/
/*
@@ -1114,8 +1099,6 @@ static inline void spi_message_free(struct spi_message *m)
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_async_locked(struct spi_device *spi,
- struct spi_message *message);
extern int spi_slave_abort(struct spi_device *spi);
static inline size_t
@@ -1198,15 +1181,6 @@ struct spi_replaced_transfers {
struct spi_transfer inserted_transfers[];
};
-extern struct spi_replaced_transfers *spi_replace_transfers(
- struct spi_message *msg,
- struct spi_transfer *xfer_first,
- size_t remove,
- size_t insert,
- spi_replaced_release_t release,
- size_t extradatasize,
- gfp_t gfp);
-
/*---------------------------------------------------------------------------*/
/* SPI transfer transformation methods */
@@ -1478,20 +1452,8 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
* normally that would be handled by spi_unregister_controller().
- *
- * You can also use spi_alloc_device() and spi_add_device() to use a two
- * stage registration sequence for each spi_device. This gives the caller
- * some more control over the spi_device structure before it is registered,
- * but requires that caller to initialize fields that would otherwise
- * be defined using the board info.
*/
extern struct spi_device *
-spi_alloc_device(struct spi_controller *ctlr);
-
-extern int
-spi_add_device(struct spi_device *spi);
-
-extern struct spi_device *
spi_new_device(struct spi_controller *, struct spi_board_info *);
extern void spi_unregister_device(struct spi_device *spi);
@@ -1505,23 +1467,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
}
-/* OF support code */
-#if IS_ENABLED(CONFIG_OF)
-
-/* must call put_device() when done with returned spi_device device */
-extern struct spi_device *
-of_find_spi_device_by_node(struct device_node *node);
-
-#else
-
-static inline struct spi_device *
-of_find_spi_device_by_node(struct device_node *node)
-{
- return NULL;
-}
-
-#endif /* IS_ENABLED(CONFIG_OF) */
-
/* Compatibility layer */
#define spi_master spi_controller
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 45310ea1b1d7..b4e5ca23f840 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -57,7 +57,6 @@
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
-#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
@@ -177,7 +176,6 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
-#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
@@ -188,18 +186,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
mmiowb_spin_lock();
}
-#ifndef arch_spin_lock_flags
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#endif
-
-static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
-{
- __acquire(lock);
- arch_spin_lock_flags(&lock->raw_lock, *flags);
- mmiowb_spin_lock();
-}
-
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&(lock)->raw_lock);
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 6b8e1a0b137b..51fa0dab68c4 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * do_raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-#else
- do_raw_spin_lock_flags(lock, &flags);
-#endif
return flags;
}
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0ac9112c1bbe..16521074b6f7 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 6bb4bc1a5f54..c34b55a6e554 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,15 +11,24 @@
#ifndef _LINUX_STACKDEPOT_H
#define _LINUX_STACKDEPOT_H
+#include <linux/gfp.h>
+
typedef u32 depot_stack_handle_t;
+depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags, bool can_alloc);
+
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
-unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces);
+
+void stack_depot_print(depot_stack_handle_t stack);
#ifdef CONFIG_STACKDEPOT
int stack_depot_init(void);
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 9edecb494e9e..bef158815e83 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -21,6 +21,7 @@ unsigned int stack_trace_save_tsk(struct task_struct *task,
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
unsigned int size, unsigned int skipnr);
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
/* Internal interfaces. Do not use in generic code */
#ifdef CONFIG_ARCH_STACKWALK
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 998a4ba28eba..ca507bd5f808 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -20,7 +20,7 @@ enum {
#endif
/**
- * sizeof_field(TYPE, MEMBER)
+ * sizeof_field() - Report the size of a struct field in bytes
*
* @TYPE: The structure containing the field of interest
* @MEMBER: The field to return the size of
@@ -28,7 +28,7 @@ enum {
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
/**
- * offsetofend(TYPE, MEMBER)
+ * offsetofend() - Report the offset of a struct field within the struct
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
@@ -36,4 +36,65 @@ enum {
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+/**
+ * struct_group() - Wrap a set of declarations in a mirrored struct
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members.
+ */
+#define struct_group(NAME, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * struct_group_attr() - Create a struct_group() with trailing attributes
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes to apply
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes structure attributes argument.
+ */
+#define struct_group_attr(NAME, ATTRS, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
+
+/**
+ * struct_group_tagged() - Create a struct_group with a reusable tag
+ *
+ * @TAG: The tag name for the named sub-struct
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes struct tag argument for the named copy,
+ * so the specified layout can be reused later.
+ */
+#define struct_group_tagged(TAG, NAME, MEMBERS...) \
+ __struct_group(TAG, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ __DECLARE_FLEX_ARRAY(TYPE, NAME)
+
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 5e96d656be7a..b6572aeca2f5 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -249,36 +249,47 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
-#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
-#define __RENAME(x) __asm__(#x)
-
-void fortify_panic(const char *name) __noreturn __cold;
-void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
-void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
-void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
-void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
-
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
#include <linux/fortify-string.h>
#endif
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad);
+
/**
- * memcpy_and_pad - Copy one buffer to another with padding
- * @dest: Where to copy to
- * @dest_len: The destination buffer size
- * @src: Where to copy from
- * @count: The number of bytes to copy
- * @pad: Character to use for padding if space is left in destination.
+ * memset_after - Set a value after a struct member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: after which struct member to start writing bytes
+ *
+ * This is good for clearing padding following the given member.
*/
-static inline void memcpy_and_pad(void *dest, size_t dest_len,
- const void *src, size_t count, int pad)
-{
- if (dest_len > count) {
- memcpy(dest, src, count);
- memset(dest + count, pad, dest_len - count);
- } else
- memcpy(dest, src, dest_len);
-}
+#define memset_after(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetofend(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetofend(typeof(*(obj)), member)); \
+})
+
+/**
+ * memset_startat - Set a value starting at a member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: struct member to start writing at
+ *
+ * Note that if there is padding between the prior member and the target
+ * member, memset_after() should be used to clear the prior padding.
+ */
+#define memset_startat(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetof(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetof(typeof(*(obj)), member)); \
+})
/**
* str_has_prefix - Test if a string has a given prefix
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 68189c4a2eb1..4ba39e1403b2 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -4,6 +4,7 @@
#include <linux/bits.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/types.h>
struct file;
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index a4661646adc9..267b7aeaf1a6 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -40,6 +40,7 @@ struct rpc_clnt {
unsigned int cl_clid; /* client id */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
+ atomic_t cl_pid; /* task PID counter */
spinlock_t cl_lock; /* spinlock */
struct rpc_xprt __rcu * cl_xprt; /* transport */
const struct rpc_procinfo *cl_procinfo; /* procedure info */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index a237b8dbf608..db964bb63912 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -150,25 +150,13 @@ struct rpc_task_setup {
#define RPC_TASK_MSG_PIN_WAIT 5
#define RPC_TASK_SIGNALLED 6
-#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_clear_running(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_running(t) clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
-#define rpc_clear_queued(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_queued(t) clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 064c96157d1f..0ae28ae6caf2 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -443,10 +443,7 @@ struct svc_version {
/* Need xprt with congestion control */
bool vs_need_cong_ctrl;
- /* Override dispatch function (e.g. when caching replies).
- * A return value of 0 means drop the request.
- * vs_dispatch == NULL means use default dispatcher.
- */
+ /* Dispatch function */
int (*vs_dispatch)(struct svc_rqst *, __be32 *);
};
@@ -457,9 +454,11 @@ struct svc_procedure {
/* process the request: */
__be32 (*pc_func)(struct svc_rqst *);
/* XDR decode args: */
- int (*pc_decode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_decode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR encode result: */
- int (*pc_encode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_encode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR free result: */
void (*pc_release)(struct svc_rqst *);
unsigned int pc_argsize; /* argument struct size */
@@ -532,8 +531,7 @@ int svc_encode_result_payload(struct svc_rqst *rqstp,
unsigned int offset,
unsigned int length);
unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
- struct page **pages,
- struct kvec *first, size_t total);
+ struct xdr_buf *payload);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
struct kvec *first, void *p,
size_t total);
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
index 068e1982ad37..74bfdffaf7b0 100644
--- a/include/linux/surface_aggregator/controller.h
+++ b/include/linux/surface_aggregator/controller.h
@@ -792,8 +792,8 @@ enum ssam_event_mask {
#define SSAM_EVENT_REGISTRY_KIP \
SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
-#define SSAM_EVENT_REGISTRY_REG \
- SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
+#define SSAM_EVENT_REGISTRY_REG(tid)\
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, tid, 0x01, 0x02)
/**
* enum ssam_event_notifier_flags - Flags for event notifiers.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ba52f3a3478e..d1ea44b31f19 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -320,11 +320,17 @@ struct vma_swap_readahead {
#endif
};
+static inline swp_entry_t folio_swap_entry(struct folio *folio)
+{
+ swp_entry_t entry = { .val = page_private(&folio->page) };
+ return entry;
+}
+
/* linux/mm/workingset.c */
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
-void workingset_refault(struct page *page, void *shadow);
-void workingset_activation(struct page *page);
+void workingset_refault(struct folio *folio, void *shadow);
+void workingset_activation(struct folio *folio);
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
@@ -335,7 +341,6 @@ void workingset_update_node(struct xa_node *node);
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
-extern unsigned long nr_free_buffer_pages(void);
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
@@ -344,9 +349,11 @@ extern unsigned long nr_free_buffer_pages(void);
/* linux/mm/swap.c */
extern void lru_note_cost(struct lruvec *lruvec, bool file,
unsigned int nr_pages);
-extern void lru_note_cost_page(struct page *);
+extern void lru_note_cost_folio(struct folio *);
+extern void folio_add_lru(struct folio *);
extern void lru_cache_add(struct page *);
-extern void mark_page_accessed(struct page *);
+void mark_page_accessed(struct page *);
+void folio_mark_accessed(struct folio *);
extern atomic_t lru_disable_count;
@@ -365,7 +372,6 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b0cb2a9973f4..569272871375 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -45,7 +45,8 @@ extern void __init swiotlb_update_mem_attributes(void);
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
size_t mapping_size, size_t alloc_size,
- enum dma_data_direction dir, unsigned long attrs);
+ unsigned int alloc_aligned_mask, enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 082f1d51957a..be24056ac00f 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -19,6 +19,7 @@
#define SWITCHTEC_EVENT_EN_CLI BIT(2)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
+#define SWITCHTEC_EVENT_NOT_SUPP BIT(31)
#define SWITCHTEC_DMA_MRPC_EN BIT(0)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 252243c7783d..528a478dbda8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -58,6 +58,7 @@ struct mq_attr;
struct compat_stat;
struct old_timeval32;
struct robust_list_head;
+struct futex_waitv;
struct getcpu_cache;
struct old_linux_dirent;
struct perf_event_attr;
@@ -610,7 +611,7 @@ asmlinkage long sys_waitid(int which, pid_t pid,
asmlinkage long sys_set_tid_address(int __user *tidptr);
asmlinkage long sys_unshare(unsigned long unshare_flags);
-/* kernel/futex.c */
+/* kernel/futex/syscalls.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
const struct __kernel_timespec __user *utime,
u32 __user *uaddr2, u32 val3);
@@ -623,6 +624,10 @@ asmlinkage long sys_get_robust_list(int pid,
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
+asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
+ unsigned int nr_futexes, unsigned int flags,
+ struct __kernel_timespec __user *timeout, clockid_t clockid);
+
/* kernel/hrtimer.c */
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 96305a64a5a7..c635c2e014e3 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -3,7 +3,7 @@
#define _LINUX_T10_PI_H
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
/*
* A T10 PI-capable target device can be formatted with different
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 3ebfea0781f1..a1f03461369b 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -197,7 +197,11 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
* @num_pages: number of locked pages
* @dmabuf: dmabuf used to for exporting to user space
* @flags: defined by TEE_SHM_* in tee_drv.h
- * @id: unique id of a shared memory object on this device
+ * @id: unique id of a shared memory object on this device, shared
+ * with user space
+ * @sec_world_id:
+ * secure world assigned id of this shared memory object, not
+ * used by all drivers
*
* This pool is only supposed to be accessed directly from the TEE
* subsystem and from drivers that implements their own shm pool manager.
@@ -213,6 +217,7 @@ struct tee_shm {
struct dma_buf *dmabuf;
u32 flags;
int id;
+ u64 sec_world_id;
};
/**
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 0999f6317978..ad0c4e041030 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -203,7 +203,7 @@ static inline void copy_overflow(int size, unsigned long count)
static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
- int sz = __compiletime_object_size(addr);
+ int sz = __builtin_object_size(addr, 0);
if (unlikely(sz >= 0 && sz < bytes)) {
if (!__builtin_constant_p(bytes))
copy_overflow(sz, bytes);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7634cd737061..0b3704ad13c8 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -186,6 +186,9 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_die_id
#define topology_die_id(cpu) ((void)(cpu), -1)
#endif
+#ifndef topology_cluster_id
+#define topology_cluster_id(cpu) ((void)(cpu), -1)
+#endif
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
@@ -195,6 +198,9 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
+#ifndef topology_cluster_cpumask
+#define topology_cluster_cpumask(cpu) cpumask_of(cpu)
+#endif
#ifndef topology_die_cpumask
#define topology_die_cpumask(cpu) cpumask_of(cpu)
#endif
@@ -206,6 +212,13 @@ static inline const struct cpumask *cpu_smt_mask(int cpu)
}
#endif
+#if defined(CONFIG_SCHED_CLUSTER) && !defined(cpu_cluster_mask)
+static inline const struct cpumask *cpu_cluster_mask(int cpu)
+{
+ return topology_cluster_cpumask(cpu);
+}
+#endif
+
static inline const struct cpumask *cpu_cpu_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 0910c5803f35..24f58e50a94b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -47,6 +47,14 @@ do { \
} while (0)
void verbose_torout_sleep(void);
+#define torture_init_error(firsterr) \
+({ \
+ int ___firsterr = (firsterr); \
+ \
+ WARN_ONCE(!IS_MODULE(CONFIG_RCU_TORTURE_TEST) && ___firsterr < 0, "Torture-test initialization failed with error code %d\n", ___firsterr); \
+ ___firsterr < 0; \
+})
+
/* Definitions for online/offline exerciser. */
#ifdef CONFIG_HOTPLUG_CPU
int torture_num_online_cpus(void);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index aa11fe323c56..12d827734686 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -269,6 +269,7 @@ enum tpm2_cc_attrs {
#define TPM_VID_INTEL 0x8086
#define TPM_VID_WINBOND 0x1050
#define TPM_VID_STM 0x104A
+#define TPM_VID_ATML 0x1114
enum tpm_chip_flags {
TPM_CHIP_FLAG_TPM2 = BIT(1),
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 3e475eeb5a99..50453b287615 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -671,7 +671,7 @@ struct trace_event_file {
} \
early_initcall(trace_init_perf_perm_##name);
-#define PERF_MAX_TRACE_SIZE 2048
+#define PERF_MAX_TRACE_SIZE 8192
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index a9f9c5714e65..c303f7a114e9 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -16,23 +16,8 @@
* When function tracing occurs, the following steps are made:
* If arch does not support a ftrace feature:
* call internal function (uses INTERNAL bits) which calls...
- * If callback is registered to the "global" list, the list
- * function is called and recursion checks the GLOBAL bits.
- * then this function calls...
* The function callback, which can use the FTRACE bits to
* check for recursion.
- *
- * Now if the arch does not support a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
*/
enum {
/* Function recursion bits */
@@ -40,12 +25,14 @@ enum {
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT,
+ TRACE_FTRACE_TRANSITION_BIT,
- /* INTERNAL_BITs must be greater than FTRACE_BITs */
+ /* Internal use recursion bits */
TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
+ TRACE_INTERNAL_TRANSITION_BIT,
TRACE_BRANCH_BIT,
/*
@@ -86,12 +73,6 @@ enum {
*/
TRACE_GRAPH_NOTRACE_BIT,
- /*
- * When transitioning between context, the preempt_count() may
- * not be correct. Allow for a single recursion to cover this case.
- */
- TRACE_TRANSITION_BIT,
-
/* Used to prevent recursion recording from recursing. */
TRACE_RECORD_RECURSION_BIT,
};
@@ -113,12 +94,10 @@ enum {
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_START TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
-#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
+#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
/*
* Used for setting context
@@ -132,17 +111,14 @@ enum {
TRACE_CTX_IRQ,
TRACE_CTX_SOFTIRQ,
TRACE_CTX_NORMAL,
+ TRACE_CTX_TRANSITION,
};
static __always_inline int trace_get_context_bit(void)
{
- unsigned long pc = preempt_count();
+ unsigned char bit = interrupt_context_level();
- if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- return TRACE_CTX_NORMAL;
- else
- return pc & NMI_MASK ? TRACE_CTX_NMI :
- pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
+ return TRACE_CTX_NORMAL - bit;
}
#ifdef CONFIG_FTRACE_RECORD_RECURSION
@@ -159,46 +135,48 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
# define do_ftrace_record_recursion(ip, pip) do { } while (0)
#endif
+/*
+ * Preemption is promised to be disabled when return bit >= 0.
+ */
static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
- int start, int max)
+ int start)
{
unsigned int val = READ_ONCE(current->trace_recursion);
int bit;
- /* A previous recursion check was made */
- if ((val & TRACE_CONTEXT_MASK) > max)
- return 0;
-
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) {
/*
- * It could be that preempt_count has not been updated during
- * a switch between contexts. Allow for a single recursion.
+ * If an interrupt occurs during a trace, and another trace
+ * happens in that interrupt but before the preempt_count is
+ * updated to reflect the new interrupt context, then this
+ * will think a recursion occurred, and the event will be dropped.
+ * Let a single instance happen via the TRANSITION_BIT to
+ * not drop those events.
*/
- bit = TRACE_TRANSITION_BIT;
+ bit = TRACE_CTX_TRANSITION + start;
if (val & (1 << bit)) {
do_ftrace_record_recursion(ip, pip);
return -1;
}
- } else {
- /* Normal check passed, clear the transition to allow it again */
- val &= ~(1 << TRACE_TRANSITION_BIT);
}
val |= 1 << bit;
current->trace_recursion = val;
barrier();
- return bit + 1;
+ preempt_disable_notrace();
+
+ return bit;
}
+/*
+ * Preemption will be enabled (if it was previously enabled).
+ */
static __always_inline void trace_clear_recursion(int bit)
{
- if (!bit)
- return;
-
+ preempt_enable_notrace();
barrier();
- bit--;
trace_recursion_clear(bit);
}
@@ -209,12 +187,12 @@ static __always_inline void trace_clear_recursion(int bit)
* tracing recursed in the same context (normal vs interrupt),
*
* Returns: -1 if a recursion happened.
- * >= 0 if no recursion
+ * >= 0 if no recursion.
*/
static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
unsigned long parent_ip)
{
- return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+ return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
}
/**
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 168e57e40bbb..5dbd7c5afac7 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -252,20 +252,20 @@ static inline bool tty_throttled(struct tty_struct *tty)
}
#ifdef CONFIG_TTY
-extern void tty_kref_put(struct tty_struct *tty);
-extern struct pid *tty_get_pgrp(struct tty_struct *tty);
-extern void tty_vhangup_self(void);
-extern void disassociate_ctty(int priv);
-extern dev_t tty_devnum(struct tty_struct *tty);
-extern void proc_clear_tty(struct task_struct *p);
-extern struct tty_struct *get_current_tty(void);
+void tty_kref_put(struct tty_struct *tty);
+struct pid *tty_get_pgrp(struct tty_struct *tty);
+void tty_vhangup_self(void);
+void disassociate_ctty(int priv);
+dev_t tty_devnum(struct tty_struct *tty);
+void proc_clear_tty(struct task_struct *p);
+struct tty_struct *get_current_tty(void);
/* tty_io.c */
-extern int __init tty_init(void);
-extern const char *tty_name(const struct tty_struct *tty);
-extern struct tty_struct *tty_kopen_exclusive(dev_t device);
-extern struct tty_struct *tty_kopen_shared(dev_t device);
-extern void tty_kclose(struct tty_struct *tty);
-extern int tty_dev_name_to_number(const char *name, dev_t *number);
+int __init tty_init(void);
+const char *tty_name(const struct tty_struct *tty);
+struct tty_struct *tty_kopen_exclusive(dev_t device);
+struct tty_struct *tty_kopen_shared(dev_t device);
+void tty_kclose(struct tty_struct *tty);
+int tty_dev_name_to_number(const char *name, dev_t *number);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
@@ -296,7 +296,7 @@ static inline int tty_dev_name_to_number(const char *name, dev_t *number)
extern struct ktermios tty_std_termios;
-extern int vcs_init(void);
+int vcs_init(void);
extern struct class *tty_class;
@@ -316,34 +316,34 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
return tty;
}
-extern const char *tty_driver_name(const struct tty_struct *tty);
-extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
-extern void stop_tty(struct tty_struct *tty);
-extern void start_tty(struct tty_struct *tty);
-extern void tty_write_message(struct tty_struct *tty, char *msg);
-extern int tty_send_xchar(struct tty_struct *tty, char ch);
-extern int tty_put_char(struct tty_struct *tty, unsigned char c);
-extern unsigned int tty_chars_in_buffer(struct tty_struct *tty);
-extern unsigned int tty_write_room(struct tty_struct *tty);
-extern void tty_driver_flush_buffer(struct tty_struct *tty);
-extern void tty_unthrottle(struct tty_struct *tty);
-extern int tty_throttle_safe(struct tty_struct *tty);
-extern int tty_unthrottle_safe(struct tty_struct *tty);
-extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
-extern int tty_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount);
-extern int is_current_pgrp_orphaned(void);
-extern void tty_hangup(struct tty_struct *tty);
-extern void tty_vhangup(struct tty_struct *tty);
-extern int tty_hung_up_p(struct file *filp);
-extern void do_SAK(struct tty_struct *tty);
-extern void __do_SAK(struct tty_struct *tty);
-extern void no_tty(void);
-extern speed_t tty_termios_baud_rate(struct ktermios *termios);
-extern void tty_termios_encode_baud_rate(struct ktermios *termios,
- speed_t ibaud, speed_t obaud);
-extern void tty_encode_baud_rate(struct tty_struct *tty,
- speed_t ibaud, speed_t obaud);
+const char *tty_driver_name(const struct tty_struct *tty);
+void tty_wait_until_sent(struct tty_struct *tty, long timeout);
+void stop_tty(struct tty_struct *tty);
+void start_tty(struct tty_struct *tty);
+void tty_write_message(struct tty_struct *tty, char *msg);
+int tty_send_xchar(struct tty_struct *tty, char ch);
+int tty_put_char(struct tty_struct *tty, unsigned char c);
+unsigned int tty_chars_in_buffer(struct tty_struct *tty);
+unsigned int tty_write_room(struct tty_struct *tty);
+void tty_driver_flush_buffer(struct tty_struct *tty);
+void tty_unthrottle(struct tty_struct *tty);
+int tty_throttle_safe(struct tty_struct *tty);
+int tty_unthrottle_safe(struct tty_struct *tty);
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
+int tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
+int is_current_pgrp_orphaned(void);
+void tty_hangup(struct tty_struct *tty);
+void tty_vhangup(struct tty_struct *tty);
+int tty_hung_up_p(struct file *filp);
+void do_SAK(struct tty_struct *tty);
+void __do_SAK(struct tty_struct *tty);
+void no_tty(void);
+speed_t tty_termios_baud_rate(struct ktermios *termios);
+void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud,
+ speed_t obaud);
+void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
+ speed_t obaud);
/**
* tty_get_baud_rate - get tty bit rates
@@ -363,37 +363,36 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
unsigned char tty_get_char_size(unsigned int cflag);
unsigned char tty_get_frame_size(unsigned int cflag);
-extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
-extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
-extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
+void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
+int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
+int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
-extern void tty_wakeup(struct tty_struct *tty);
+void tty_wakeup(struct tty_struct *tty);
-extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
-extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
-extern void tty_release_struct(struct tty_struct *tty, int idx);
-extern void tty_init_termios(struct tty_struct *tty);
-extern void tty_save_termios(struct tty_struct *tty);
-extern int tty_standard_install(struct tty_driver *driver,
+int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
+void tty_release_struct(struct tty_struct *tty, int idx);
+void tty_init_termios(struct tty_struct *tty);
+void tty_save_termios(struct tty_struct *tty);
+int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
extern struct mutex tty_mutex;
/* n_tty.c */
-extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
#ifdef CONFIG_TTY
-extern void __init n_tty_init(void);
+void __init n_tty_init(void);
#else
static inline void n_tty_init(void) { }
#endif
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-extern void tty_audit_exit(void);
-extern void tty_audit_fork(struct signal_struct *sig);
-extern int tty_audit_push(void);
+void tty_audit_exit(void);
+void tty_audit_fork(struct signal_struct *sig);
+int tty_audit_push(void);
#else
static inline void tty_audit_exit(void)
{
@@ -408,24 +407,23 @@ static inline int tty_audit_push(void)
#endif
/* tty_ioctl.c */
-extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
+int n_tty_ioctl_helper(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
/* vt.c */
-extern int vt_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+int vt_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-extern long vt_compat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+long vt_compat_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
/* tty_mutex.c */
/* functions for preparation of BKL removal */
-extern void tty_lock(struct tty_struct *tty);
-extern int tty_lock_interruptible(struct tty_struct *tty);
-extern void tty_unlock(struct tty_struct *tty);
-extern void tty_lock_slave(struct tty_struct *tty);
-extern void tty_unlock_slave(struct tty_struct *tty);
-extern void tty_set_lock_subclass(struct tty_struct *tty);
+void tty_lock(struct tty_struct *tty);
+int tty_lock_interruptible(struct tty_struct *tty);
+void tty_unlock(struct tty_struct *tty);
+void tty_lock_slave(struct tty_struct *tty);
+void tty_unlock_slave(struct tty_struct *tty);
+void tty_set_lock_subclass(struct tty_struct *tty);
#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index c20431d8def8..795b94ccdeb6 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -327,11 +327,11 @@ struct tty_driver {
extern struct list_head tty_drivers;
-extern struct tty_driver *__tty_alloc_driver(unsigned int lines,
- struct module *owner, unsigned long flags);
-extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
+struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
+ unsigned long flags);
+struct tty_driver *tty_find_polling_driver(char *name, int *line);
-extern void tty_driver_kref_put(struct tty_driver *driver);
+void tty_driver_kref_put(struct tty_driver *driver);
/* Use TTY_DRIVER_* flags below */
#define tty_alloc_driver(lines, flags) \
@@ -360,7 +360,7 @@ static inline void tty_set_operations(struct tty_driver *driver,
* Used for PTY's, in particular.
*
* TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
- * guarantee never not to set any special character handling
+ * guarantee never to set any special character handling
* flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
* !INPCK)). That is, if there is no reason for the driver to
* send notifications of parity and break characters up to the
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 32284992b31a..9916acb5de49 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -7,16 +7,16 @@
struct tty_ldisc;
-extern int tty_buffer_set_limit(struct tty_port *port, int limit);
-extern unsigned int tty_buffer_space_avail(struct tty_port *port);
-extern int tty_buffer_request_room(struct tty_port *port, size_t size);
-extern int tty_insert_flip_string_flags(struct tty_port *port,
+int tty_buffer_set_limit(struct tty_port *port, int limit);
+unsigned int tty_buffer_space_avail(struct tty_port *port);
+int tty_buffer_request_room(struct tty_port *port, size_t size);
+int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size);
-extern int tty_insert_flip_string_fixed_flag(struct tty_port *port,
+int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size);
-extern int tty_prepare_flip_string(struct tty_port *port,
- unsigned char **chars, size_t size);
-extern void tty_flip_buffer_push(struct tty_port *port);
+int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
+ size_t size);
+void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
@@ -45,7 +45,7 @@ static inline int tty_insert_flip_string(struct tty_port *port,
int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
const char *f, int count);
-extern void tty_buffer_lock_exclusive(struct tty_port *port);
-extern void tty_buffer_unlock_exclusive(struct tty_port *port);
+void tty_buffer_lock_exclusive(struct tty_port *port);
+void tty_buffer_unlock_exclusive(struct tty_port *port);
#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index b1d812e902aa..b85d84fb5f49 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -146,7 +146,7 @@ struct ld_semaphore {
#endif
};
-extern void __init_ldsem(struct ld_semaphore *sem, const char *name,
+void __init_ldsem(struct ld_semaphore *sem, const char *name,
struct lock_class_key *key);
#define init_ldsem(sem) \
@@ -157,18 +157,18 @@ do { \
} while (0)
-extern int ldsem_down_read(struct ld_semaphore *sem, long timeout);
-extern int ldsem_down_read_trylock(struct ld_semaphore *sem);
-extern int ldsem_down_write(struct ld_semaphore *sem, long timeout);
-extern int ldsem_down_write_trylock(struct ld_semaphore *sem);
-extern void ldsem_up_read(struct ld_semaphore *sem);
-extern void ldsem_up_write(struct ld_semaphore *sem);
+int ldsem_down_read(struct ld_semaphore *sem, long timeout);
+int ldsem_down_read_trylock(struct ld_semaphore *sem);
+int ldsem_down_write(struct ld_semaphore *sem, long timeout);
+int ldsem_down_write_trylock(struct ld_semaphore *sem);
+void ldsem_up_read(struct ld_semaphore *sem);
+void ldsem_up_write(struct ld_semaphore *sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
- long timeout);
-extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
- long timeout);
+int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
+int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
#else
# define ldsem_down_read_nested(sem, subclass, timeout) \
ldsem_down_read(sem, timeout)
@@ -180,7 +180,6 @@ extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
struct tty_ldisc_ops {
char *name;
int num;
- int flags;
/*
* The following routines are called from above.
@@ -200,7 +199,7 @@ struct tty_ldisc_ops {
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
__poll_t (*poll)(struct tty_struct *, struct file *,
struct poll_table_struct *);
- int (*hangup)(struct tty_struct *tty);
+ void (*hangup)(struct tty_struct *tty);
/*
* The following routines are called from below.
@@ -220,8 +219,6 @@ struct tty_ldisc {
struct tty_struct *tty;
};
-#define LDISC_FLAG_DEFINED 0x00000001
-
#define MODULE_ALIAS_LDISC(ldisc) \
MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index e81856c0ba13..e8ec116c916b 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -83,6 +83,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return local64_read(&p->v);
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ local64_set(&p->v, val);
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
local64_add(val, &p->v);
@@ -104,6 +109,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return p->v;
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ p->v = val;
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
p->v += val;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 207101a9c5c3..6350354f97e9 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -35,6 +35,7 @@ struct iov_iter_state {
struct iov_iter {
u8 iter_type;
+ bool nofault;
bool data_source;
size_t iov_offset;
size_t count;
@@ -133,7 +134,8 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 2c1fc9212cf2..548a028f2dab 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -124,7 +124,6 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
-#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -135,7 +134,6 @@ struct usb_hcd {
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
-#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
/*
* Specifies if interfaces are authorized by default
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index fd1c9f6a4e37..d3e65eb9e16f 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/usb/otg.h>
@@ -30,6 +31,7 @@
* enter host mode
* requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level
* and hsdiscon_level should be set for adequate signal quality
+ * requires_pmc_ao_power_up: true if USB AO is powered down by default
*/
struct tegra_phy_soc_config {
@@ -37,6 +39,7 @@ struct tegra_phy_soc_config {
bool has_hostpc;
bool requires_usbmode_setup;
bool requires_extra_tuning_parameters;
+ bool requires_pmc_ao_power_up;
};
struct tegra_utmip_config {
@@ -62,6 +65,7 @@ enum tegra_usb_phy_port_speed {
struct tegra_xtal_freq;
struct tegra_usb_phy {
+ int irq;
int instance;
const struct tegra_xtal_freq *freq;
void __iomem *regs;
@@ -70,6 +74,7 @@ struct tegra_usb_phy {
struct clk *pll_u;
struct clk *pad_clk;
struct regulator *vbus;
+ struct regmap *pmc_regmap;
enum usb_dr_mode mode;
void *config;
const struct tegra_phy_soc_config *soc_config;
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index eb70cabe6e7f..33a4240e6a6f 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -127,6 +127,8 @@ static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type t
long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
static inline void set_rlimit_ucount_max(struct user_namespace *ns,
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 3972ab765de1..c3011ccda430 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -6,6 +6,8 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vhost_iotlb.h>
+#include <linux/virtio_net.h>
+#include <linux/if_ether.h>
/**
* struct vdpa_calllback - vDPA callback definition.
@@ -63,6 +65,7 @@ struct vdpa_mgmt_dev;
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
* @config: the configuration ops for this device.
+ * @cf_mutex: Protects get and set access to configuration layout.
* @index: device index
* @features_valid: were features initialized? for legacy guests
* @use_va: indicate whether virtual address must be used by this device
@@ -74,6 +77,7 @@ struct vdpa_device {
struct device dev;
struct device *dma_dev;
const struct vdpa_config_ops *config;
+ struct mutex cf_mutex; /* Protects get/set config */
unsigned int index;
bool features_valid;
bool use_va;
@@ -91,6 +95,14 @@ struct vdpa_iova_range {
u64 last;
};
+struct vdpa_dev_set_config {
+ struct {
+ u8 mac[ETH_ALEN];
+ u16 mtu;
+ } net;
+ u64 mask;
+};
+
/**
* Corresponding file area for device memory mapping
* @file: vma->vm_file for the mapping
@@ -171,6 +183,9 @@ struct vdpa_map_file {
* @get_vq_num_max: Get the max size of virtqueue
* @vdev: vdpa device
* Returns u16: max size of virtqueue
+ * @get_vq_num_min: Get the min size of virtqueue (optional)
+ * @vdev: vdpa device
+ * Returns u16: min size of virtqueue
* @get_device_id: Get virtio device id
* @vdev: vdpa device
* Returns u32: virtio device id
@@ -257,7 +272,7 @@ struct vdpa_config_ops {
struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* vq irq is not expected to be changed once DRIVER_OK is set */
- int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
+ int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
@@ -266,6 +281,7 @@ struct vdpa_config_ops {
void (*set_config_cb)(struct vdpa_device *vdev,
struct vdpa_callback *cb);
u16 (*get_vq_num_max)(struct vdpa_device *vdev);
+ u16 (*get_vq_num_min)(struct vdpa_device *vdev);
u32 (*get_device_id)(struct vdpa_device *vdev);
u32 (*get_vendor_id)(struct vdpa_device *vdev);
u8 (*get_status)(struct vdpa_device *vdev);
@@ -382,26 +398,16 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
return ops->set_features(vdev, features);
}
-static inline void vdpa_get_config(struct vdpa_device *vdev,
- unsigned int offset, void *buf,
- unsigned int len)
-{
- const struct vdpa_config_ops *ops = vdev->config;
-
- /*
- * Config accesses aren't supposed to trigger before features are set.
- * If it does happen we assume a legacy guest.
- */
- if (!vdev->features_valid)
- vdpa_set_features(vdev, 0);
- ops->get_config(vdev, offset, buf, len);
-}
-
+void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
+ void *buf, unsigned int len);
+void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
+ const void *buf, unsigned int length);
/**
* struct vdpa_mgmtdev_ops - vdpa device ops
* @dev_add: Add a vdpa device using alloc and register
* @mdev: parent device to use for device addition
* @name: name of the new vdpa device
+ * @config: config attributes to apply to the device under creation
* Driver need to add a new device using _vdpa_register_device()
* after fully initializing the vdpa device. Driver must return 0
* on success or appropriate error code.
@@ -412,14 +418,25 @@ static inline void vdpa_get_config(struct vdpa_device *vdev,
* _vdpa_unregister_device().
*/
struct vdpa_mgmtdev_ops {
- int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name);
+ int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
};
+/**
+ * struct vdpa_mgmt_dev - vdpa management device
+ * @device: Management parent device
+ * @ops: operations supported by management device
+ * @id_table: Pointer to device id table of supported ids
+ * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
+ * management device support during dev_add callback
+ * @list: list entry
+ */
struct vdpa_mgmt_dev {
struct device *device;
const struct vdpa_mgmtdev_ops *ops;
- const struct virtio_device_id *id_table; /* supported ids */
+ const struct virtio_device_id *id_table;
+ u64 config_attr_mask;
struct list_head list;
};
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 1eaaa93c37bf..329d63babaeb 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -15,7 +15,7 @@
#else
#define MODULE_VERMAGIC_SMP ""
#endif
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_BUILD
#define MODULE_VERMAGIC_PREEMPT "preempt "
#elif defined(CONFIG_PREEMPT_RT)
#define MODULE_VERMAGIC_PREEMPT "preempt_rt "
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index b53a9557884a..76191d7abed1 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -71,68 +71,17 @@ struct vfio_device_ops {
int (*match)(struct vfio_device *vdev, char *buf);
};
-extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
-extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
-
void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
const struct vfio_device_ops *ops);
void vfio_uninit_group_dev(struct vfio_device *device);
int vfio_register_group_dev(struct vfio_device *device);
+int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device);
extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
extern void vfio_device_put(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
-/* events for the backend driver notify callback */
-enum vfio_iommu_notify_type {
- VFIO_IOMMU_CONTAINER_CLOSE = 0,
-};
-
-/**
- * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
- */
-struct vfio_iommu_driver_ops {
- char *name;
- struct module *owner;
- void *(*open)(unsigned long arg);
- void (*release)(void *iommu_data);
- ssize_t (*read)(void *iommu_data, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(void *iommu_data, const char __user *buf,
- size_t count, loff_t *size);
- long (*ioctl)(void *iommu_data, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(void *iommu_data, struct vm_area_struct *vma);
- int (*attach_group)(void *iommu_data,
- struct iommu_group *group);
- void (*detach_group)(void *iommu_data,
- struct iommu_group *group);
- int (*pin_pages)(void *iommu_data,
- struct iommu_group *group,
- unsigned long *user_pfn,
- int npage, int prot,
- unsigned long *phys_pfn);
- int (*unpin_pages)(void *iommu_data,
- unsigned long *user_pfn, int npage);
- int (*register_notifier)(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb);
- int (*unregister_notifier)(void *iommu_data,
- struct notifier_block *nb);
- int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
- void *data, size_t count, bool write);
- struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
- struct iommu_group *group);
- void (*notify)(void *iommu_data,
- enum vfio_iommu_notify_type event);
-};
-
-extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
-
-extern void vfio_unregister_iommu_driver(
- const struct vfio_iommu_driver_ops *ops);
-
/*
* External user API
*/
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 41edbc01ffa4..44d0e09da2d9 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -152,6 +152,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
* @feature_table_size: number of entries in the feature table array.
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
+ * @suppress_used_validation: set to not have core validate used length
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @scan: optional function to call after successful probe; intended
* for virtio-scsi to invoke a scan.
@@ -168,6 +169,7 @@ struct virtio_driver {
unsigned int feature_table_size;
const unsigned int *feature_table_legacy;
unsigned int feature_table_size_legacy;
+ bool suppress_used_validation;
int (*validate)(struct virtio_device *dev);
int (*probe)(struct virtio_device *dev);
void (*scan)(struct virtio_device *dev);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 8519b3ae5d52..4d107ad31149 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -23,6 +23,8 @@ struct virtio_shm_region {
* any of @get/@set, @get_status/@set_status, or @get_features/
* @finalize_features are NOT safe to be called from an atomic
* context.
+ * @enable_cbs: enable the callbacks
+ * vdev: the virtio_device
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
@@ -75,6 +77,7 @@ struct virtio_shm_region {
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
+ void (*enable_cbs)(struct virtio_device *vdev);
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
@@ -229,6 +232,9 @@ void virtio_device_ready(struct virtio_device *dev)
{
unsigned status = dev->config->get_status(dev);
+ if (dev->config->enable_cbs)
+ dev->config->enable_cbs(dev);
+
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
}
diff --git a/include/linux/virtio_pci_legacy.h b/include/linux/virtio_pci_legacy.h
new file mode 100644
index 000000000000..e5d665faf00e
--- /dev/null
+++ b/include/linux/virtio_pci_legacy.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_LEGACY_H
+#define _LINUX_VIRTIO_PCI_LEGACY_H
+
+#include "linux/mod_devicetable.h"
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+
+struct virtio_pci_legacy_device {
+ struct pci_dev *pci_dev;
+
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
+ /* The IO mapping for the PCI config space (legacy mode only) */
+ void __iomem *ioaddr;
+
+ struct virtio_device_id id;
+};
+
+u64 vp_legacy_get_features(struct virtio_pci_legacy_device *ldev);
+u64 vp_legacy_get_driver_features(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_set_features(struct virtio_pci_legacy_device *ldev,
+ u32 features);
+u8 vp_legacy_get_status(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_set_status(struct virtio_pci_legacy_device *ldev,
+ u8 status);
+u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
+ u16 idx, u16 vector);
+u16 vp_legacy_config_vector(struct virtio_pci_legacy_device *ldev,
+ u16 vector);
+void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev,
+ u16 index, u32 queue_pfn);
+bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev,
+ u16 idx);
+void vp_legacy_set_queue_size(struct virtio_pci_legacy_device *ldev,
+ u16 idx, u16 size);
+u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev,
+ u16 idx);
+int vp_legacy_probe(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_remove(struct virtio_pci_legacy_device *ldev);
+
+#endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 671d402c3778..6e022cc712e6 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -22,7 +22,7 @@ struct notifier_block; /* in notifier.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
-#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
@@ -136,21 +136,21 @@ static inline void vmalloc_init(void)
static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
-extern void *vmalloc(unsigned long size);
-extern void *vzalloc(unsigned long size);
-extern void *vmalloc_user(unsigned long size);
-extern void *vmalloc_node(unsigned long size, int node);
-extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_32(unsigned long size);
-extern void *vmalloc_32_user(unsigned long size);
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
+extern void *vmalloc(unsigned long size) __alloc_size(1);
+extern void *vzalloc(unsigned long size) __alloc_size(1);
+extern void *vmalloc_user(unsigned long size) __alloc_size(1);
+extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vmalloc_32(unsigned long size) __alloc_size(1);
+extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller);
+ const void *caller) __alloc_size(1);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
- int node, const void *caller);
-void *vmalloc_no_huge(unsigned long size);
+ int node, const void *caller) __alloc_size(1);
+void *vmalloc_no_huge(unsigned long size) __alloc_size(1);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d6a6cf53b127..bfe38869498d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -415,6 +415,78 @@ static inline void drain_zonestat(struct zone *zone,
struct per_cpu_zonestat *pzstats) { }
#endif /* CONFIG_SMP */
+static inline void __zone_stat_mod_folio(struct folio *folio,
+ enum zone_stat_item item, long nr)
+{
+ __mod_zone_page_state(folio_zone(folio), item, nr);
+}
+
+static inline void __zone_stat_add_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
+}
+
+static inline void __zone_stat_sub_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void zone_stat_mod_folio(struct folio *folio,
+ enum zone_stat_item item, long nr)
+{
+ mod_zone_page_state(folio_zone(folio), item, nr);
+}
+
+static inline void zone_stat_add_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
+}
+
+static inline void zone_stat_sub_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void __node_stat_mod_folio(struct folio *folio,
+ enum node_stat_item item, long nr)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, nr);
+}
+
+static inline void __node_stat_add_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
+}
+
+static inline void __node_stat_sub_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void node_stat_mod_folio(struct folio *folio,
+ enum node_stat_item item, long nr)
+{
+ mod_node_page_state(folio_pgdat(folio), item, nr);
+}
+
+static inline void node_stat_add_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
+}
+
+static inline void node_stat_sub_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
+}
+
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
@@ -525,12 +597,6 @@ static inline void mod_lruvec_page_state(struct page *page,
#endif /* CONFIG_MEMCG */
-static inline void inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- mod_lruvec_state(lruvec, idx, 1);
-}
-
static inline void __inc_lruvec_page_state(struct page *page,
enum node_stat_item idx)
{
@@ -543,6 +609,24 @@ static inline void __dec_lruvec_page_state(struct page *page,
__mod_lruvec_page_state(page, idx, -1);
}
+static inline void __lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
+{
+ __mod_lruvec_page_state(&folio->page, idx, val);
+}
+
+static inline void __lruvec_stat_add_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
+}
+
+static inline void __lruvec_stat_sub_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
+}
+
static inline void inc_lruvec_page_state(struct page *page,
enum node_stat_item idx)
{
@@ -555,4 +639,21 @@ static inline void dec_lruvec_page_state(struct page *page,
mod_lruvec_page_state(page, idx, -1);
}
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
+{
+ mod_lruvec_page_state(&folio->page, idx, val);
+}
+
+static inline void lruvec_stat_add_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
+}
+
+static inline void lruvec_stat_sub_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
+}
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 93dab0e9580f..2d0df57c9902 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1160,6 +1160,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
(wait)->flags = 0; \
} while (0)
-bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
+typedef int (*task_call_f)(struct task_struct *p, void *arg);
+extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 74d3c1efd9bb..7fee9b6cfede 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
-extern void show_workqueue_state(void);
+extern void show_all_workqueues(void);
+extern void show_one_workqueue(struct workqueue_struct *wq);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
/**
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index d1f65adf6a26..3bfd487d1dd2 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -11,7 +11,6 @@
#include <linux/flex_proportions.h>
#include <linux/backing-dev-defs.h>
#include <linux/blk_types.h>
-#include <linux/blk-cgroup.h>
struct bio;
@@ -109,15 +108,12 @@ static inline int wbc_to_write_flags(struct writeback_control *wbc)
return flags;
}
-static inline struct cgroup_subsys_state *
-wbc_blkcg_css(struct writeback_control *wbc)
-{
#ifdef CONFIG_CGROUP_WRITEBACK
- if (wbc->wb)
- return wbc->wb->blkcg_css;
-#endif
- return blkcg_root_css;
-}
+#define wbc_blkcg_css(wbc) \
+ ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css)
+#else
+#define wbc_blkcg_css(wbc) (blkcg_root_css)
+#endif /* CONFIG_CGROUP_WRITEBACK */
/*
* A wb_domain represents a domain that wb's (bdi_writeback's) belong to
@@ -393,7 +389,14 @@ void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-void account_page_redirty(struct page *page);
+bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
+void folio_account_redirty(struct folio *folio);
+static inline void account_page_redirty(struct page *page)
+{
+ folio_account_redirty(page_folio(page));
+}
+bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
+bool redirty_page_for_writepage(struct writeback_control *, struct page *);
void sb_mark_inode_writeback(struct inode *inode);
void sb_clear_inode_writeback(struct inode *inode);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 29db736af86d..bb763085479a 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -28,12 +28,10 @@
#ifndef CONFIG_PREEMPT_RT
#define WW_MUTEX_BASE mutex
#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k)
-#define ww_mutex_base_trylock(l) mutex_trylock(l)
#define ww_mutex_base_is_locked(b) mutex_is_locked((b))
#else
#define WW_MUTEX_BASE rt_mutex
#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k)
-#define ww_mutex_base_trylock(l) rt_mutex_trylock(l)
#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex)
#endif
@@ -339,17 +337,8 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
extern void ww_mutex_unlock(struct ww_mutex *lock);
-/**
- * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
- * @lock: mutex to lock
- *
- * Trylocks a mutex without acquire context, so no deadlock detection is
- * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
- */
-static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
-{
- return ww_mutex_base_trylock(&lock->base);
-}
+extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 9884c8440188..7285ca5d56e9 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -234,6 +234,112 @@ XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
XZ_EXTERN void xz_dec_end(struct xz_dec *s);
/*
+ * Decompressor for MicroLZMA, an LZMA variant with a very minimal header.
+ * See xz_dec_microlzma_alloc() below for details.
+ *
+ * These functions aren't used or available in preboot code and thus aren't
+ * marked with XZ_EXTERN. This avoids warnings about static functions that
+ * are never defined.
+ */
+/**
+ * struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
+ */
+struct xz_dec_microlzma;
+
+/**
+ * xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
+ * @mode XZ_SINGLE or XZ_PREALLOC
+ * @dict_size LZMA dictionary size. This must be at least 4 KiB and
+ * at most 3 GiB.
+ *
+ * In contrast to xz_dec_init(), this function only allocates the memory
+ * and remembers the dictionary size. xz_dec_microlzma_reset() must be used
+ * before calling xz_dec_microlzma_run().
+ *
+ * The amount of allocated memory is a little less than 30 KiB with XZ_SINGLE.
+ * With XZ_PREALLOC also a dictionary buffer of dict_size bytes is allocated.
+ *
+ * On success, xz_dec_microlzma_alloc() returns a pointer to
+ * struct xz_dec_microlzma. If memory allocation fails or
+ * dict_size is invalid, NULL is returned.
+ *
+ * The compressed format supported by this decoder is a raw LZMA stream
+ * whose first byte (always 0x00) has been replaced with bitwise-negation
+ * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
+ * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
+ * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
+ * marker must not be used. The unused values are reserved for future use.
+ * This MicroLZMA header format was created for use in EROFS but may be used
+ * by others too.
+ */
+extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size);
+
+/**
+ * xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
+ * @s Decoder state allocated using xz_dec_microlzma_alloc()
+ * @comp_size Compressed size of the input stream
+ * @uncomp_size Uncompressed size of the input stream. A value smaller
+ * than the real uncompressed size of the input stream can
+ * be specified if uncomp_size_is_exact is set to false.
+ * uncomp_size can never be set to a value larger than the
+ * expected real uncompressed size because it would eventually
+ * result in XZ_DATA_ERROR.
+ * @uncomp_size_is_exact This is an int instead of bool to avoid
+ * requiring stdbool.h. This should normally be set to true.
+ * When this is set to false, error detection is weaker.
+ */
+extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
+ uint32_t comp_size, uint32_t uncomp_size,
+ int uncomp_size_is_exact);
+
+/**
+ * xz_dec_microlzma_run() - Run the MicroLZMA decoder
+ * @s Decoder state initialized using xz_dec_microlzma_reset()
+ * @b: Input and output buffers
+ *
+ * This works similarly to xz_dec_run() with a few important differences.
+ * Only the differences are documented here.
+ *
+ * The only possible return values are XZ_OK, XZ_STREAM_END, and
+ * XZ_DATA_ERROR. This function cannot return XZ_BUF_ERROR: if no progress
+ * is possible due to lack of input data or output space, this function will
+ * keep returning XZ_OK. Thus, the calling code must be written so that it
+ * will eventually provide input and output space matching (or exceeding)
+ * comp_size and uncomp_size arguments given to xz_dec_microlzma_reset().
+ * If the caller cannot do this (for example, if the input file is truncated
+ * or otherwise corrupt), the caller must detect this error by itself to
+ * avoid an infinite loop.
+ *
+ * If the compressed data seems to be corrupt, XZ_DATA_ERROR is returned.
+ * This can happen also when incorrect dictionary, uncompressed, or
+ * compressed sizes have been specified.
+ *
+ * With XZ_PREALLOC only: As an extra feature, b->out may be NULL to skip over
+ * uncompressed data. This way the caller doesn't need to provide a temporary
+ * output buffer for the bytes that will be ignored.
+ *
+ * With XZ_SINGLE only: In contrast to xz_dec_run(), the return value XZ_OK
+ * is also possible and thus XZ_SINGLE is actually a limited multi-call mode.
+ * After XZ_OK the bytes decoded so far may be read from the output buffer.
+ * It is possible to continue decoding but the variables b->out and b->out_pos
+ * MUST NOT be changed by the caller. Increasing the value of b->out_size is
+ * allowed to make more output space available; one doesn't need to provide
+ * space for the whole uncompressed data on the first call. The input buffer
+ * may be changed normally like with XZ_PREALLOC. This way input data can be
+ * provided from non-contiguous memory.
+ */
+extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s,
+ struct xz_buf *b);
+
+/**
+ * xz_dec_microlzma_end() - Free the memory allocated for the decoder state
+ * @s: Decoder state allocated using xz_dec_microlzma_alloc().
+ * If s is NULL, this function does nothing.
+ */
+extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
+
+/*
* Standalone build (userspace build or in-kernel build for boot time use)
* needs a CRC32 implementation. For normal in-kernel use, kernel's own
* CRC32 module is used instead, and users of this module don't need to
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index e87f78c9b19c..113408eef6ec 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -1,138 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd) and
+ * the GPLv2 (found in the COPYING file in the root directory of
+ * https://github.com/facebook/zstd). You may select, at your option, one of the
+ * above-listed licenses.
*/
-#ifndef ZSTD_H
-#define ZSTD_H
+#ifndef LINUX_ZSTD_H
+#define LINUX_ZSTD_H
-/* ====== Dependency ======*/
-#include <linux/types.h> /* size_t */
+/**
+ * This is a kernel-style API that wraps the upstream zstd API, which cannot be
+ * used directly because the symbols aren't exported. It exposes the minimal
+ * functionality which is currently required by users of zstd in the kernel.
+ * Expose extra functions from lib/zstd/zstd.h as needed.
+ */
+/* ====== Dependency ====== */
+#include <linux/types.h>
+#include <linux/zstd_errors.h>
+#include <linux/zstd_lib.h>
-/*-*****************************************************************************
- * Introduction
+/* ====== Helper Functions ====== */
+/**
+ * zstd_compress_bound() - maximum compressed size in worst case scenario
+ * @src_size: The size of the data to compress.
*
- * zstd, short for Zstandard, is a fast lossless compression algorithm,
- * targeting real-time compression scenarios at zlib-level and better
- * compression ratios. The zstd compression library provides in-memory
- * compression and decompression functions. The library supports compression
- * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
- * ultra, should be used with caution, as they require more memory.
- * Compression can be done in:
- * - a single step, reusing a context (described as Explicit memory management)
- * - unbounded multiple steps (described as Streaming compression)
- * The compression ratio achievable on small data can be highly improved using
- * compression with a dictionary in:
- * - a single step (described as Simple dictionary API)
- * - a single step, reusing a dictionary (described as Fast dictionary API)
- ******************************************************************************/
-
-/*====== Helper functions ======*/
+ * Return: The maximum compressed size in the worst case scenario.
+ */
+size_t zstd_compress_bound(size_t src_size);
/**
- * enum ZSTD_ErrorCode - zstd error codes
+ * zstd_is_error() - tells if a size_t function result is an error code
+ * @code: The function result to check for error.
*
- * Functions that return size_t can be checked for errors using ZSTD_isError()
- * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
+ * Return: Non-zero iff the code is an error.
+ */
+unsigned int zstd_is_error(size_t code);
+
+/**
+ * enum zstd_error_code - zstd error codes
*/
-typedef enum {
- ZSTD_error_no_error,
- ZSTD_error_GENERIC,
- ZSTD_error_prefix_unknown,
- ZSTD_error_version_unsupported,
- ZSTD_error_parameter_unknown,
- ZSTD_error_frameParameter_unsupported,
- ZSTD_error_frameParameter_unsupportedBy32bits,
- ZSTD_error_frameParameter_windowTooLarge,
- ZSTD_error_compressionParameter_unsupported,
- ZSTD_error_init_missing,
- ZSTD_error_memory_allocation,
- ZSTD_error_stage_wrong,
- ZSTD_error_dstSize_tooSmall,
- ZSTD_error_srcSize_wrong,
- ZSTD_error_corruption_detected,
- ZSTD_error_checksum_wrong,
- ZSTD_error_tableLog_tooLarge,
- ZSTD_error_maxSymbolValue_tooLarge,
- ZSTD_error_maxSymbolValue_tooSmall,
- ZSTD_error_dictionary_corrupted,
- ZSTD_error_dictionary_wrong,
- ZSTD_error_dictionaryCreation_failed,
- ZSTD_error_maxCode
-} ZSTD_ErrorCode;
+typedef ZSTD_ErrorCode zstd_error_code;
/**
- * ZSTD_maxCLevel() - maximum compression level available
+ * zstd_get_error_code() - translates an error function result to an error code
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: Maximum compression level available.
+ * Return: A unique error code for this error.
*/
-int ZSTD_maxCLevel(void);
+zstd_error_code zstd_get_error_code(size_t code);
+
/**
- * ZSTD_compressBound() - maximum compressed size in worst case scenario
- * @srcSize: The size of the data to compress.
+ * zstd_get_error_name() - translates an error function result to a string
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: The maximum compressed size in the worst case scenario.
+ * Return: An error string corresponding to the error code.
*/
-size_t ZSTD_compressBound(size_t srcSize);
+const char *zstd_get_error_name(size_t code);
+
/**
- * ZSTD_isError() - tells if a size_t function result is an error code
- * @code: The function result to check for error.
+ * zstd_min_clevel() - minimum allowed compression level
*
- * Return: Non-zero iff the code is an error.
+ * Return: The minimum allowed compression level.
*/
-static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
-{
- return code > (size_t)-ZSTD_error_maxCode;
-}
+int zstd_min_clevel(void);
+
/**
- * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
- * @functionResult: The result of a function for which ZSTD_isError() is true.
+ * zstd_max_clevel() - maximum allowed compression level
*
- * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0
- * if the functionResult isn't an error.
+ * Return: The maximum allowed compression level.
*/
-static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
- size_t functionResult)
-{
- if (!ZSTD_isError(functionResult))
- return (ZSTD_ErrorCode)0;
- return (ZSTD_ErrorCode)(0 - functionResult);
-}
+int zstd_max_clevel(void);
+
+/* ====== Parameter Selection ====== */
/**
- * enum ZSTD_strategy - zstd compression search strategy
+ * enum zstd_strategy - zstd compression search strategy
*
- * From faster to stronger.
+ * From faster to stronger. See zstd_lib.h.
*/
-typedef enum {
- ZSTD_fast,
- ZSTD_dfast,
- ZSTD_greedy,
- ZSTD_lazy,
- ZSTD_lazy2,
- ZSTD_btlazy2,
- ZSTD_btopt,
- ZSTD_btopt2
-} ZSTD_strategy;
+typedef ZSTD_strategy zstd_strategy;
/**
- * struct ZSTD_compressionParameters - zstd compression parameters
+ * struct zstd_compression_parameters - zstd compression parameters
* @windowLog: Log of the largest match distance. Larger means more
* compression, and more memory needed during decompression.
- * @chainLog: Fully searched segment. Larger means more compression, slower,
- * and more memory (useless for fast).
+ * @chainLog: Fully searched segment. Larger means more compression,
+ * slower, and more memory (useless for fast).
* @hashLog: Dispatch table. Larger means more compression,
* slower, and more memory.
* @searchLog: Number of searches. Larger means more compression and slower.
@@ -141,1017 +100,348 @@ typedef enum {
* @targetLength: Acceptable match size for optimal parser (only). Larger means
* more compression, and slower.
* @strategy: The zstd compression strategy.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned int windowLog;
- unsigned int chainLog;
- unsigned int hashLog;
- unsigned int searchLog;
- unsigned int searchLength;
- unsigned int targetLength;
- ZSTD_strategy strategy;
-} ZSTD_compressionParameters;
+typedef ZSTD_compressionParameters zstd_compression_parameters;
/**
- * struct ZSTD_frameParameters - zstd frame parameters
- * @contentSizeFlag: Controls whether content size will be present in the frame
- * header (when known).
- * @checksumFlag: Controls whether a 32-bit checksum is generated at the end
- * of the frame for error detection.
- * @noDictIDFlag: Controls whether dictID will be saved into the frame header
- * when using dictionary compression.
+ * struct zstd_frame_parameters - zstd frame parameters
+ * @contentSizeFlag: Controls whether content size will be present in the
+ * frame header (when known).
+ * @checksumFlag: Controls whether a 32-bit checksum is generated at the
+ * end of the frame for error detection.
+ * @noDictIDFlag: Controls whether dictID will be saved into the frame
+ * header when using dictionary compression.
*
- * The default value is all fields set to 0.
+ * The default value is all fields set to 0. See zstd_lib.h.
*/
-typedef struct {
- unsigned int contentSizeFlag;
- unsigned int checksumFlag;
- unsigned int noDictIDFlag;
-} ZSTD_frameParameters;
+typedef ZSTD_frameParameters zstd_frame_parameters;
/**
- * struct ZSTD_parameters - zstd parameters
+ * struct zstd_parameters - zstd parameters
* @cParams: The compression parameters.
* @fParams: The frame parameters.
*/
-typedef struct {
- ZSTD_compressionParameters cParams;
- ZSTD_frameParameters fParams;
-} ZSTD_parameters;
+typedef ZSTD_parameters zstd_parameters;
/**
- * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
+ * zstd_get_params() - returns zstd_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
*
- * Return: The selected ZSTD_compressionParameters.
+ * Return: The selected zstd_parameters.
*/
-ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+zstd_parameters zstd_get_params(int level,
+ unsigned long long estimated_src_size);
-/**
- * ZSTD_getParams() - returns ZSTD_parameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
- *
- * The same as ZSTD_getCParams() except also selects the default frame
- * parameters (all zero).
- *
- * Return: The selected ZSTD_parameters.
- */
-ZSTD_parameters ZSTD_getParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+/* ====== Single-pass Compression ====== */
-/*-*************************************
- * Explicit memory management
- **************************************/
+typedef ZSTD_CCtx zstd_cctx;
/**
- * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
+ * @parameters: The compression parameters to be used.
*
* If multiple compression parameters might be used, the caller must call
- * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
+ * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
* size.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCCtx().
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cctx().
*/
-size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
+size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
/**
- * struct ZSTD_CCtx - the zstd compression context
- *
- * When compressing many times it is recommended to allocate a context just once
- * and reuse it for each successive compression operation.
- */
-typedef struct ZSTD_CCtx_s ZSTD_CCtx;
-/**
- * ZSTD_initCCtx() - initialize a zstd compression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A compression context emplaced into workspace.
- */
-ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_compressCCtx() - compress src into dst
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, ZSTD_parameters params);
-
-/**
- * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
- *
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDCtx().
- */
-size_t ZSTD_DCtxWorkspaceBound(void);
-
-/**
- * struct ZSTD_DCtx - the zstd decompression context
- *
- * When decompressing many times it is recommended to allocate a context just
- * once and reuse it for each successive decompression operation.
- */
-typedef struct ZSTD_DCtx_s ZSTD_DCtx;
-/**
- * ZSTD_initDCtx() - initialize a zstd decompression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A decompression context emplaced into workspace.
- */
-ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-/*-************************
- * Simple dictionary API
- **************************/
-
-/**
- * ZSTD_compress_usingDict() - compress src into dst using a dictionary
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @dict: The dictionary to use for compression.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a predefined dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params);
-
-/**
- * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @dict: The dictionary to use for decompression. The same dictionary
- * must've been used to compress the data.
- * @dictSize: The size of the dictionary.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize);
-
-/*-**************************
- * Fast dictionary API
- ***************************/
-
-/**
- * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
- * @cParams: The compression parameters to be used for compression.
+ * zstd_init_cctx() - initialize a zstd compression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCDict().
- */
-size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CDict - a digested dictionary to be used for compression
+ * Return: A zstd compression context or NULL on error.
*/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
+zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_initCDict() - initialize a digested dictionary for compression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_CDict so it must outlive the returned ZSTD_CDict.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- * @workspace: The workspace. It must outlive the returned ZSTD_CDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_CDictWorkspaceBound(params.cParams).
+ * zstd_compress_cctx() - compress src into dst with the initialized parameters
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @parameters: The compression parameters to be used.
*
- * When compressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_CDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_CDict.
- *
- * Return: The digested dictionary emplaced into workspace.
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
- ZSTD_parameters params, void *workspace, size_t workspaceSize);
+size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size, const zstd_parameters *parameters);
-/**
- * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(cParams) where
- * cParams are the compression parameters used to initialize the
- * cdict.
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @cdict: The digested dictionary to use for compression.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a digested dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const ZSTD_CDict *cdict);
+/* ====== Single-pass Decompression ====== */
+typedef ZSTD_DCtx zstd_dctx;
/**
- * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
+ * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDDict().
- */
-size_t ZSTD_DDictWorkspaceBound(void);
-
-/**
- * struct ZSTD_DDict - a digested dictionary to be used for decompression
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_dctx().
*/
-typedef struct ZSTD_DDict_s ZSTD_DDict;
+size_t zstd_dctx_workspace_bound(void);
/**
- * ZSTD_initDDict() - initialize a digested dictionary for decompression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_DDict so it must outlive the returned ZSTD_DDict.
- * @dictSize: The size of the dictionary.
- * @workspace: The workspace. It must outlive the returned ZSTD_DDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_DDictWorkspaceBound().
- *
- * When decompressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_DDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_DDict.
+ * zstd_init_dctx() - initialize a zstd decompression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: The digested dictionary emplaced into workspace.
+ * Return: A zstd decompression context or NULL on error.
*/
-ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
- void *workspace, size_t workspaceSize);
+zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @ddict: The digested dictionary to use for decompression. The same
- * dictionary must've been used to compress the data.
+ * zstd_decompress_dctx() - decompress zstd compressed src into dst
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
*
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
- size_t dstCapacity, const void *src, size_t srcSize,
- const ZSTD_DDict *ddict);
+size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size);
-
-/*-**************************
- * Streaming
- ***************************/
+/* ====== Streaming Buffers ====== */
/**
- * struct ZSTD_inBuffer - input buffer for streaming
+ * struct zstd_in_buffer - input buffer for streaming
* @src: Start of the input buffer.
* @size: Size of the input buffer.
* @pos: Position where reading stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_inBuffer_s {
- const void *src;
- size_t size;
- size_t pos;
-} ZSTD_inBuffer;
+typedef ZSTD_inBuffer zstd_in_buffer;
/**
- * struct ZSTD_outBuffer - output buffer for streaming
+ * struct zstd_out_buffer - output buffer for streaming
* @dst: Start of the output buffer.
* @size: Size of the output buffer.
* @pos: Position where writing stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_outBuffer_s {
- void *dst;
- size_t size;
- size_t pos;
-} ZSTD_outBuffer;
+typedef ZSTD_outBuffer zstd_out_buffer;
+/* ====== Streaming Compression ====== */
-
-/*-*****************************************************************************
- * Streaming compression - HowTo
- *
- * A ZSTD_CStream object is required to track streaming operation.
- * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
- * ZSTD_CStream objects can be reused multiple times on consecutive compression
- * operations. It is recommended to re-use ZSTD_CStream in situations where many
- * streaming operations will be achieved consecutively. Use one separate
- * ZSTD_CStream per thread for parallel execution.
- *
- * Use ZSTD_compressStream() repetitively to consume input stream.
- * The function will automatically update both `pos` fields.
- * Note that it may not consume the entire input, in which case `pos < size`,
- * and it's up to the caller to present again remaining data.
- * It returns a hint for the preferred number of bytes to use as an input for
- * the next function call.
- *
- * At any moment, it's possible to flush whatever data remains within internal
- * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
- * still be some content left within the internal buffer if `output->size` is
- * too small. It returns the number of bytes left in the internal buffer and
- * must be called until it returns 0.
- *
- * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
- * write frame epilogue. The epilogue is required for decoders to consider a
- * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
- * the full content if `output->size` is too small. In which case, call again
- * ZSTD_endStream() to complete the flush. It returns the number of bytes left
- * in the internal buffer and must be called until it returns 0.
- ******************************************************************************/
+typedef ZSTD_CStream zstd_cstream;
/**
- * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
+ * @cparams: The compression parameters to be used for compression.
*
* Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
- */
-size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CStream - the zstd streaming compression context
- */
-typedef struct ZSTD_CStream_s ZSTD_CStream;
-
-/*===== ZSTD_CStream management functions =====*/
-/**
- * ZSTD_initCStream() - initialize a zstd streaming compression context
- * @params: The zstd compression parameters.
- * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
- * pass the source size (zero means empty source). Otherwise,
- * the caller may optionally pass the source size, or zero if
- * unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming compression context.
+ * zstd_init_cstream().
*/
-ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
/**
- * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
- * @cdict: The digested dictionary to use for compression.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound()
- * with the cParams used to initialize the cdict to determine
- * how large the workspace must be.
+ * zstd_init_cstream() - initialize a zstd streaming compression context
+ * @parameters The zstd parameters to use for compression.
+ * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
+ * must pass the source size (zero means empty source).
+ * Otherwise, the caller may optionally pass the source
+ * size, or zero if unknown.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace.
+ * Use zstd_cstream_workspace_bound(params->cparams) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming compression context.
+ * Return: The zstd streaming compression context or NULL on error.
*/
-ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
+ unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
-/*===== Streaming compression functions =====*/
/**
- * ZSTD_resetCStream() - reset the context using parameters from creation
- * @zcs: The zstd streaming compression context to reset.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ * zstd_reset_cstream() - reset the context using parameters from creation
+ * @cstream: The zstd streaming compression context to reset.
+ * @pledged_src_size: Optionally the source size, or zero if unknown.
*
* Resets the context using the parameters from creation. Skips dictionary
- * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
+ * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
* content size is always written into the frame header.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
+size_t zstd_reset_cstream(zstd_cstream *cstream,
+ unsigned long long pledged_src_size);
+
/**
- * ZSTD_compressStream() - streaming compress some of input into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- * @input: Source buffer. `input->pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input->pos < input->size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_compress_stream() - streaming compress some of input into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
+ * @input: Source buffer. `input->pos` is updated to indicate how much data
+ * was read. Note that it may not consume the entire input, in which
+ * case `input->pos < input->size`, and it's up to the caller to
+ * present remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
*
- * Return: A hint for the number of bytes to use as the input for the next
- * function call or an error, which can be checked using
- * ZSTD_isError().
+ * Return: A hint for the number of bytes to use as the input for the next
+ * function call or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
+
/**
- * ZSTD_flushStream() - flush internal buffers into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
+ * zstd_flush_stream() - flush internal buffers into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * ZSTD_flushStream() must be called until it returns 0, meaning all the data
- * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
+ * zstd_flush_stream() must be called until it returns 0, meaning all the data
+ * has been flushed. Since zstd_flush_stream() causes a block to be ended,
* calling it too often will degrade the compression ratio.
*
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
-/**
- * ZSTD_endStream() - flush internal buffers into output and end the frame
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- *
- * ZSTD_endStream() must be called until it returns 0, meaning all the data has
- * been flushed and the frame epilogue has been written.
- *
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
- */
-size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
/**
- * ZSTD_CStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_CStreamInSize(void);
-/**
- * ZSTD_CStreamOutSize() - recommended size for the output buffer
+ * zstd_end_stream() - flush internal buffers into output and end the frame
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete compressed block.
+ * zstd_end_stream() must be called until it returns 0, meaning all the data has
+ * been flushed and the frame epilogue has been written.
*
- * Return: The recommended size for the output buffer.
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_CStreamOutSize(void);
+size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
+/* ====== Streaming Decompression ====== */
-
-/*-*****************************************************************************
- * Streaming decompression - HowTo
- *
- * A ZSTD_DStream object is required to track streaming operations.
- * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
- * ZSTD_DStream objects can be re-used multiple times.
- *
- * Use ZSTD_decompressStream() repetitively to consume your input.
- * The function will update both `pos` fields.
- * If `input->pos < input->size`, some input has not been consumed.
- * It's up to the caller to present again remaining data.
- * If `output->pos < output->size`, decoder has flushed everything it could.
- * Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise it returns a suggested next input size that will never load more
- * than the current frame.
- ******************************************************************************/
+typedef ZSTD_DStream zstd_dstream;
/**
- * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
- * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
+ * @max_window_size: The maximum window size allowed for compressed frames.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
+ * Return: A lower bound on the size of the workspace that is passed
+ * to zstd_init_dstream().
*/
-size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
+size_t zstd_dstream_workspace_bound(size_t max_window_size);
/**
- * struct ZSTD_DStream - the zstd streaming decompression context
- */
-typedef struct ZSTD_DStream_s ZSTD_DStream;
-/*===== ZSTD_DStream management functions =====*/
-/**
- * ZSTD_initDStream() - initialize a zstd streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming decompression context.
- */
-ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
- size_t workspaceSize);
-/**
- * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @ddict: The digested dictionary to use for decompression.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
+ * zstd_init_dstream() - initialize a zstd streaming decompression context
+ * @max_window_size: The maximum window size allowed for compressed frames.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace.
+ * Use zstd_dstream_workspace_bound(max_window_size) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming decompression context.
+ * Return: The zstd streaming decompression context.
*/
-ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
- const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
+zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
+ size_t workspace_size);
-/*===== Streaming decompression functions =====*/
/**
- * ZSTD_resetDStream() - reset the context using parameters from creation
- * @zds: The zstd streaming decompression context to reset.
+ * zstd_reset_dstream() - reset the context using parameters from creation
+ * @dstream: The zstd streaming decompression context to reset.
*
* Resets the context using the parameters from creation. Skips dictionary
* loading, since it can be reused.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_resetDStream(ZSTD_DStream *zds);
+size_t zstd_reset_dstream(zstd_dstream *dstream);
+
/**
- * ZSTD_decompressStream() - streaming decompress some of input into output
- * @zds: The zstd streaming decompression context.
- * @output: Destination buffer. `output.pos` is updated to indicate how much
- * decompressed data was written.
- * @input: Source buffer. `input.pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input.pos < input.size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_decompress_stream() - streaming decompress some of input into output
+ * @dstream: The zstd streaming decompression context.
+ * @output: Destination buffer. `output.pos` is updated to indicate how much
+ * decompressed data was written.
+ * @input: Source buffer. `input.pos` is updated to indicate how much data was
+ * read. Note that it may not consume the entire input, in which case
+ * `input.pos < input.size`, and it's up to the caller to present
+ * remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
- * ZSTD_decompressStream() will not consume the last byte of the frame until
+ * zstd_decompress_stream() will not consume the last byte of the frame until
* the entire frame is flushed.
*
- * Return: Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise returns a hint for the number of bytes to use as the input
- * for the next function call or an error, which can be checked using
- * ZSTD_isError(). The size hint will never load more than the frame.
+ * Return: Returns 0 iff a frame is completely decoded and fully flushed.
+ * Otherwise returns a hint for the number of bytes to use as the
+ * input for the next function call or an error, which can be checked
+ * using zstd_is_error(). The size hint will never load more than the
+ * frame.
*/
-size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
-/**
- * ZSTD_DStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_DStreamInSize(void);
-/**
- * ZSTD_DStreamOutSize() - recommended size for the output buffer
- *
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete decompressed block.
- *
- * Return: The recommended size for the output buffer.
- */
-size_t ZSTD_DStreamOutSize(void);
-
-
-/* --- Constants ---*/
-#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
-
-#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
-#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
-
-#define ZSTD_WINDOWLOG_MAX_32 27
-#define ZSTD_WINDOWLOG_MAX_64 27
-#define ZSTD_WINDOWLOG_MAX \
- ((unsigned int)(sizeof(size_t) == 4 \
- ? ZSTD_WINDOWLOG_MAX_32 \
- : ZSTD_WINDOWLOG_MAX_64))
-#define ZSTD_WINDOWLOG_MIN 10
-#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
-#define ZSTD_HASHLOG_MIN 6
-#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
-#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
-#define ZSTD_HASHLOG3_MAX 17
-#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN 1
-/* only for ZSTD_fast, other strategies are limited to 6 */
-#define ZSTD_SEARCHLENGTH_MAX 7
-/* only for ZSTD_btopt, other strategies are limited to 4 */
-#define ZSTD_SEARCHLENGTH_MIN 3
-#define ZSTD_TARGETLENGTH_MIN 4
-#define ZSTD_TARGETLENGTH_MAX 999
-
-/* for static allocation */
-#define ZSTD_FRAMEHEADERSIZE_MAX 18
-#define ZSTD_FRAMEHEADERSIZE_MIN 6
-#define ZSTD_frameHeaderSize_prefix 5
-#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN
-#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX
-/* magic number + skippable frame length */
-#define ZSTD_skippableHeaderSize 8
-
-
-/*-*************************************
- * Compressed size functions
- **************************************/
-
-/**
- * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
- * @src: Source buffer. It should point to the start of a zstd encoded frame
- * or a skippable frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * size of the frame.
- *
- * Return: The compressed size of the frame pointed to by `src` or an error,
- * which can be check with ZSTD_isError().
- * Suitable to pass to ZSTD_decompress() or similar functions.
- */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Decompressed size functions
- **************************************/
-/**
- * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
- * @src: It should point to the start of a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The frame content size stored in the frame header if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
- * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
- */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+/* ====== Frame Inspection Functions ====== */
/**
- * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
- * @src: It should point to the start of a series of zstd encoded and/or
- * skippable frames.
- * @srcSize: The exact size of the series of frames.
+ * zstd_find_frame_compressed_size() - returns the size of a compressed frame
+ * @src: Source buffer. It should point to the start of a zstd encoded
+ * frame or a skippable frame.
+ * @src_size: The size of the source buffer. It must be at least as large as the
+ * size of the frame.
*
- * If any zstd encoded frame in the series doesn't have the frame content size
- * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
- * set when using ZSTD_compress(). The decompressed size can be very large.
- * If the source is untrusted, the decompressed size could be wrong or
- * intentionally modified. Always ensure the result fits within the
- * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
- * frames, and so it must traverse the input to read each frame header. This is
- * efficient as most of the data is skipped, however it does mean that all frame
- * data must be present and valid.
- *
- * Return: Decompressed size of all the data contained in the frames if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
- * `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
- */
-unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Advanced compression functions
- **************************************/
-/**
- * ZSTD_checkCParams() - ensure parameter values remain within authorized range
- * @cParams: The zstd compression parameters.
- *
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: The compressed size of the frame pointed to by `src` or an error,
+ * which can be check with zstd_is_error().
+ * Suitable to pass to ZSTD_decompress() or similar functions.
*/
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
+size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
/**
- * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
- * @srcSize: Optionally the estimated source size, or zero if unknown.
- * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
- *
- * Return: The optimized parameters.
- */
-ZSTD_compressionParameters ZSTD_adjustCParams(
- ZSTD_compressionParameters cParams, unsigned long long srcSize,
- size_t dictSize);
-
-/*--- Advanced decompression functions ---*/
-
-/**
- * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
- * @buffer: The source buffer to check.
- * @size: The size of the source buffer, must be at least 4 bytes.
- *
- * Return: True iff the buffer starts with a zstd or skippable frame identifier.
- */
-unsigned int ZSTD_isFrame(const void *buffer, size_t size);
-
-/**
- * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
- * @dict: The dictionary buffer.
- * @dictSize: The size of the dictionary buffer.
- *
- * Return: The dictionary id stored within the dictionary or 0 if the
- * dictionary is not a zstd dictionary. If it returns 0 the
- * dictionary can still be loaded as a content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
-
-/**
- * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
- * @ddict: The ddict to find the id of.
- *
- * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
- * a zstd dictionary. If it returns 0 `ddict` will be loaded as a
- * content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
-
-/**
- * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
- * @src: Source buffer. It must be a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The dictionary id required to decompress the frame stored within
- * `src` or 0 if the dictionary id could not be decoded. It can return
- * 0 if the frame does not require a dictionary, the dictionary id
- * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
- * is too small.
- */
-unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
-
-/**
- * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
- * @frameContentSize: The frame content size, or 0 if not present.
+ * struct zstd_frame_params - zstd frame parameters stored in the frame header
+ * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
+ * present.
* @windowSize: The window size, or 0 if the frame is a skippable frame.
+ * @blockSizeMax: The maximum block size.
+ * @frameType: The frame type (zstd or skippable)
+ * @headerSize: The size of the frame header.
* @dictID: The dictionary id, or 0 if not present.
* @checksumFlag: Whether a checksum was used.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned long long frameContentSize;
- unsigned int windowSize;
- unsigned int dictID;
- unsigned int checksumFlag;
-} ZSTD_frameParams;
+typedef ZSTD_frameHeader zstd_frame_header;
/**
- * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
- * @fparamsPtr: On success the frame parameters are written here.
- * @src: The source buffer. It must point to a zstd or skippable frame.
- * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is
- * always large enough to succeed.
+ * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
+ * @params: On success the frame parameters are written here.
+ * @src: The source buffer. It must point to a zstd or skippable frame.
+ * @src_size: The size of the source buffer.
*
- * Return: 0 on success. If more data is required it returns how many bytes
- * must be provided to make forward progress. Otherwise it returns
- * an error, which can be checked using ZSTD_isError().
+ * Return: 0 on success. If more data is required it returns how many bytes
+ * must be provided to make forward progress. Otherwise it returns
+ * an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
- size_t srcSize);
-
-/*-*****************************************************************************
- * Buffer-less and synchronous inner streaming functions
- *
- * This is an advanced API, giving full control over buffer management, for
- * users which need direct control over memory.
- * But it's also a complex one, with many restrictions (documented below).
- * Prefer using normal streaming API for an easier experience
- ******************************************************************************/
-
-/*-*****************************************************************************
- * Buffer-less streaming compression (synchronous mode)
- *
- * A ZSTD_CCtx object is required to track streaming operations.
- * Use ZSTD_initCCtx() to initialize a context.
- * ZSTD_CCtx object can be re-used multiple times within successive compression
- * operations.
- *
- * Start by initializing a context.
- * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
- * compression,
- * or ZSTD_compressBegin_advanced(), for finer parameter control.
- * It's also possible to duplicate a reference context which has already been
- * initialized, using ZSTD_copyCCtx()
- *
- * Then, consume your input using ZSTD_compressContinue().
- * There are some important considerations to keep in mind when using this
- * advanced function :
- * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
- * buffer only.
- * - Interface is synchronous : input is consumed entirely and produce 1+
- * (or more) compressed blocks.
- * - Caller must ensure there is enough space in `dst` to store compressed data
- * under worst case scenario. Worst case evaluation is provided by
- * ZSTD_compressBound().
- * ZSTD_compressContinue() doesn't guarantee recover after a failed
- * compression.
- * - ZSTD_compressContinue() presumes prior input ***is still accessible and
- * unmodified*** (up to maximum distance size, see WindowLog).
- * It remembers all previous contiguous blocks, plus one separated memory
- * segment (which can itself consists of multiple contiguous blocks)
- * - ZSTD_compressContinue() detects that prior input has been overwritten when
- * `src` buffer overlaps. In which case, it will "discard" the relevant memory
- * section from its history.
- *
- * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
- * and optional checksum. It's possible to use srcSize==0, in which case, it
- * will write a final empty block to end the frame. Without last block mark,
- * frames will be considered unfinished (corrupted) by decoders.
- *
- * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
- * frame.
- ******************************************************************************/
-
-/*===== Buffer-less streaming compression functions =====*/
-size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, int compressionLevel);
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, ZSTD_parameters params,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-
-
-/*-*****************************************************************************
- * Buffer-less streaming decompression (synchronous mode)
- *
- * A ZSTD_DCtx object is required to track streaming operations.
- * Use ZSTD_initDCtx() to initialize a context.
- * A ZSTD_DCtx object can be re-used multiple times.
- *
- * First typical operation is to retrieve frame parameters, using
- * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
- * important information to correctly decode the frame, such as the minimum
- * rolling buffer size to allocate to decompress data (`windowSize`), and the
- * dictionary ID used.
- * Note: content size is optional, it may not be present. 0 means unknown.
- * Note that these values could be wrong, either because of data malformation,
- * or because an attacker is spoofing deliberate false information. As a
- * consequence, check that values remain within valid application range,
- * especially `windowSize`, before allocation. Each application can set its own
- * limit, depending on local restrictions. For extended interoperability, it is
- * recommended to support at least 8 MB.
- * Frame parameters are extracted from the beginning of the compressed frame.
- * Data fragment must be large enough to ensure successful decoding, typically
- * `ZSTD_frameHeaderSize_max` bytes.
- * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
- * >0: `srcSize` is too small, provide at least this many bytes.
- * errorCode, which can be tested using ZSTD_isError().
- *
- * Start decompression, with ZSTD_decompressBegin() or
- * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
- * context, using ZSTD_copyDCtx().
- *
- * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
- * alternatively.
- * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
- * to ZSTD_decompressContinue().
- * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
- * fail.
- *
- * The result of ZSTD_decompressContinue() is the number of bytes regenerated
- * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
- * error; it just means ZSTD_decompressContinue() has decoded some metadata
- * item. It can also be an error code, which can be tested with ZSTD_isError().
- *
- * ZSTD_decompressContinue() needs previous data blocks during decompression, up
- * to `windowSize`. They should preferably be located contiguously, prior to
- * current block. Alternatively, a round buffer of sufficient size is also
- * possible. Sufficient size is determined by frame parameters.
- * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
- * follow each other, make sure that either the compressor breaks contiguity at
- * the same place, or that previous contiguous segment is large enough to
- * properly handle maximum back-reference.
- *
- * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
- * Context can then be reset to start a new decompression.
- *
- * Note: it's possible to know if next input to present is a header or a block,
- * using ZSTD_nextInputType(). This information is not required to properly
- * decode a frame.
- *
- * == Special case: skippable frames ==
- *
- * Skippable frames allow integration of user-defined data into a flow of
- * concatenated frames. Skippable frames will be ignored (skipped) by a
- * decompressor. The format of skippable frames is as follows:
- * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
- * 0x184D2A50 to 0x184D2A5F
- * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
- * c) Frame Content - any content (User Data) of length equal to Frame Size
- * For skippable frames ZSTD_decompressContinue() always returns 0.
- * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
- * what means that a frame is skippable.
- * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
- * actually be a zstd encoded frame with no content. For purposes of
- * decompression, it is valid in both cases to skip the frame using
- * ZSTD_findFrameCompressedSize() to find its size in bytes.
- * It also returns frame size as fparamsPtr->frameContentSize.
- ******************************************************************************/
-
-/*===== Buffer-less streaming decompression functions =====*/
-size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
- size_t dictSize);
-void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
-size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-typedef enum {
- ZSTDnit_frameHeader,
- ZSTDnit_blockHeader,
- ZSTDnit_block,
- ZSTDnit_lastBlock,
- ZSTDnit_checksum,
- ZSTDnit_skippableFrame
-} ZSTD_nextInputType_e;
-ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
-
-/*-*****************************************************************************
- * Block functions
- *
- * Block functions produce and decode raw zstd blocks, without frame metadata.
- * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
- * very small blocks (< 100 bytes). User will have to take in charge required
- * information to regenerate data, such as compressed and content sizes.
- *
- * A few rules to respect:
- * - Compressing and decompressing require a context structure
- * + Use ZSTD_initCCtx() and ZSTD_initDCtx()
- * - It is necessary to init context before starting
- * + compression : ZSTD_compressBegin()
- * + decompression : ZSTD_decompressBegin()
- * + variants _usingDict() are also allowed
- * + copyCCtx() and copyDCtx() work too
- * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
- * + If you need to compress more, cut data into multiple blocks
- * + Consider using the regular ZSTD_compress() instead, as frame metadata
- * costs become negligible when source size is large.
- * - When a block is considered not compressible enough, ZSTD_compressBlock()
- * result will be zero. In which case, nothing is produced into `dst`.
- * + User must test for such outcome and deal directly with uncompressed data
- * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
- * + In case of multiple successive blocks, decoder must be informed of
- * uncompressed block existence to follow proper history. Use
- * ZSTD_insertBlock() in such a case.
- ******************************************************************************/
-
-/* Define for static allocation */
-#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
-/*===== Raw zstd block functions =====*/
-size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
-size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
- size_t blockSize);
+size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
+ size_t src_size);
-#endif /* ZSTD_H */
+#endif /* LINUX_ZSTD_H */
diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
new file mode 100644
index 000000000000..58b6dd45a969
--- /dev/null
+++ b/include/linux/zstd_errors.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+
+/*===== dependency =====*/
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#define ZSTDERRORLIB_VISIBILITY
+#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+
+/*-*********************************************
+ * Error codes list
+ *-*********************************************
+ * Error codes _values_ are pinned down since v1.3.1 only.
+ * Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ * Only values < 100 are considered stable.
+ *
+ * note 1 : this API shall be used with static linking only.
+ * dynamic linking is not yet officially supported.
+ * note 2 : Prefer relying on the enum than on its value whenever possible
+ * This is the only supported way to use the error list < v1.3.1
+ * note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
+typedef enum {
+ ZSTD_error_no_error = 0,
+ ZSTD_error_GENERIC = 1,
+ ZSTD_error_prefix_unknown = 10,
+ ZSTD_error_version_unsupported = 12,
+ ZSTD_error_frameParameter_unsupported = 14,
+ ZSTD_error_frameParameter_windowTooLarge = 16,
+ ZSTD_error_corruption_detected = 20,
+ ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_dictionary_corrupted = 30,
+ ZSTD_error_dictionary_wrong = 32,
+ ZSTD_error_dictionaryCreation_failed = 34,
+ ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_outOfBound = 42,
+ ZSTD_error_tableLog_tooLarge = 44,
+ ZSTD_error_maxSymbolValue_tooLarge = 46,
+ ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_stage_wrong = 60,
+ ZSTD_error_init_missing = 62,
+ ZSTD_error_memory_allocation = 64,
+ ZSTD_error_workSpace_tooSmall= 66,
+ ZSTD_error_dstSize_tooSmall = 70,
+ ZSTD_error_srcSize_wrong = 72,
+ ZSTD_error_dstBuffer_null = 74,
+ /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+ ZSTD_error_frameIndex_tooLarge = 100,
+ ZSTD_error_seekableIO = 102,
+ ZSTD_error_dstBuffer_wrong = 104,
+ ZSTD_error_srcBuffer_wrong = 105,
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+ convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+ which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /*< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+
+
+
+#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
new file mode 100644
index 000000000000..b8c7dbf98390
--- /dev/null
+++ b/include/linux/zstd_lib.h
@@ -0,0 +1,2432 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ====== Dependency ======*/
+#include <linux/limits.h> /* INT_MAX */
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDLIB_API : control library symbols visibility ===== */
+#define ZSTDLIB_VISIBILITY
+#define ZSTDLIB_API ZSTDLIB_VISIBILITY
+
+
+/* *****************************************************************************
+ Introduction
+
+ zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+ real-time compression scenarios at zlib-level and better compression ratios.
+ The zstd compression library provides in-memory compression and decompression
+ functions.
+
+ The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+ which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+ caution, as they require more memory. The library also offers negative
+ compression levels, which extend the range of speed vs. ratio preferences.
+ The lower the level, the faster the speed (at the cost of compression).
+
+ Compression can be done in:
+ - a single step (described as Simple API)
+ - a single step, reusing a context (described as Explicit context)
+ - unbounded multiple steps (described as Streaming compression)
+
+ The compression ratio achievable on small data can be highly improved using
+ a dictionary. Dictionary compression can be performed in:
+ - a single step (described as Simple dictionary API)
+ - a single step, reusing a dictionary (described as Bulk-processing
+ dictionary API)
+
+ Advanced experimental functions can be accessed using
+ `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+ Advanced experimental APIs should never be used with a dynamically-linked
+ library. They are not "stable"; their definitions or signatures may change in
+ the future. Only static linking is allowed.
+*******************************************************************************/
+
+/*------ Version ------*/
+#define ZSTD_VERSION_MAJOR 1
+#define ZSTD_VERSION_MINOR 4
+#define ZSTD_VERSION_RELEASE 10
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+/*! ZSTD_versionNumber() :
+ * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+
+/*! ZSTD_versionString() :
+ * Return runtime library version, like "1.4.5". Requires v1.3.0+. */
+ZSTDLIB_API const char* ZSTD_versionString(void);
+
+/* *************************************
+ * Default constant
+ ***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+# define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+/* *************************************
+ * Constants
+ ***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
+
+
+
+/* *************************************
+* Simple API
+***************************************/
+/*! ZSTD_compress() :
+ * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress() :
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/*! ZSTD_getFrameContentSize() : requires v1.3.0+
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/*! ZSTD_getDecompressedSize() :
+ * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ * Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ * "empty", "unknown" and "error" results to the same return value (0),
+ * while ZSTD_getFrameContentSize() gives them separate return values.
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ * suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ * or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
+
+/*====== Helper functions ======*/
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+
+
+/* *************************************
+* Explicit context
+***************************************/
+/*= Compression context
+ * When compressing many times,
+ * it is recommended to allocate a context just once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Note : re-using context is just a speed / resource optimization.
+ * It doesn't change the compression ratio, which remains identical.
+ * Note 2 : In multi-threaded environments,
+ * use one different context per thread for parallel execution.
+ */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
+
+/*! ZSTD_compressCCtx() :
+ * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
+ * Important : in order to behave similarly to `ZSTD_compress()`,
+ * this function compresses at requested compression level,
+ * __ignoring any other parameter__ .
+ * If any advanced parameter was set using the advanced API,
+ * they will all be reset. Only `compressionLevel` remains.
+ */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */
+
+/*! ZSTD_decompressDCtx() :
+ * Same as ZSTD_decompress(),
+ * requires an allocated ZSTD_DCtx.
+ * Compatible with sticky parameters.
+ */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *************************************
+* Advanced compression API
+***************************************/
+
+/* API design :
+ * Parameters are pushed one by one into an existing context,
+ * using ZSTD_CCtx_set*() functions.
+ * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
+ * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
+ * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
+ *
+ * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
+ *
+ * This API supercedes all other "advanced" API entry points in the experimental section.
+ * In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ */
+
+
+/* Compression strategies, listed from fastest to strongest */
+typedef enum { ZSTD_fast=1,
+ ZSTD_dfast=2,
+ ZSTD_greedy=3,
+ ZSTD_lazy=4,
+ ZSTD_lazy2=5,
+ ZSTD_btlazy2=6,
+ ZSTD_btopt=7,
+ ZSTD_btultra=8,
+ ZSTD_btultra2=9
+ /* note : new strategies _might_ be added in the future.
+ Only the order (from fast to strong) is guaranteed */
+} ZSTD_strategy;
+
+
+typedef enum {
+
+ /* compression parameters
+ * Note: When compressing with a ZSTD_CDict these parameters are superseded
+ * by the parameters used to construct the ZSTD_CDict.
+ * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
+ ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
+ * Note that exact compression parameters are dynamically determined,
+ * depending on both compression level and srcSize (when known).
+ * Default level is ZSTD_CLEVEL_DEFAULT==3.
+ * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
+ * Note 1 : it's possible to pass a negative compression level.
+ * Note 2 : setting a level does not automatically set all other compression parameters
+ * to default. Setting this will however eventually dynamically impact the compression
+ * parameters which have not been manually set. The manually set
+ * ones will 'stick'. */
+ /* Advanced compression parameters :
+ * It's possible to pin down compression parameters to some specific values.
+ * In which case, these values are no longer dynamically selected by the compressor */
+ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2.
+ * This will set a memory budget for streaming decompression,
+ * with larger values requiring more memory
+ * and typically compressing more.
+ * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+ * Special: value 0 means "use default windowLog".
+ * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
+ * requires explicitly allowing such size at streaming decompression stage. */
+ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2.
+ * Resulting memory usage is (1 << (hashLog+2)).
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+ * Larger tables improve compression ratio of strategies <= dFast,
+ * and improve speed of strategies > dFast.
+ * Special: value 0 means "use default hashLog". */
+ ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2.
+ * Resulting memory usage is (1 << (chainLog+2)).
+ * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
+ * Larger tables result in better and slower compression.
+ * This parameter is useless for "fast" strategy.
+ * It's still useful when using "dfast" strategy,
+ * in which case it defines a secondary probe table.
+ * Special: value 0 means "use default chainLog". */
+ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2.
+ * More attempts result in better and slower compression.
+ * This parameter is useless for "fast" and "dFast" strategies.
+ * Special: value 0 means "use default searchLog". */
+ ZSTD_c_minMatch=105, /* Minimum size of searched matches.
+ * Note that Zstandard can still find matches of smaller size,
+ * it just tweaks its search algorithm to look for this size and larger.
+ * Larger values increase compression and decompression speed, but decrease ratio.
+ * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
+ * Note that currently, for all strategies < btopt, effective minimum is 4.
+ * , for all strategies > fast, effective maximum is 6.
+ * Special: value 0 means "use default minMatchLength". */
+ ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
+ * For strategies btopt, btultra & btultra2:
+ * Length of Match considered "good enough" to stop search.
+ * Larger values make compression stronger, and slower.
+ * For strategy fast:
+ * Distance between match sampling.
+ * Larger values make compression faster, and weaker.
+ * Special: value 0 means "use default targetLength". */
+ ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition.
+ * The higher the value of selected strategy, the more complex it is,
+ * resulting in stronger and slower compression.
+ * Special: value 0 means "use default strategy". */
+
+ /* LDM mode parameters */
+ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
+ * This parameter is designed to improve compression ratio
+ * for large inputs, by finding large matches at long distance.
+ * It increases memory usage and window size.
+ * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
+ * except when expressly set to a different value.
+ * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
+ * compression strategy >= ZSTD_btopt (== compression level 16+) */
+ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2.
+ * Larger values increase memory usage and compression ratio,
+ * but decrease compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * default: windowlog - 7.
+ * Special: value 0 means "automatically determine hashlog". */
+ ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher.
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+ * Special: value 0 means "use default value" (default: 64). */
+ ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
+ * Larger values improve collision resolution but decrease compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
+ * Special: value 0 means "use default value" (default: 3). */
+ ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
+ * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+ * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+ * Larger values improve compression speed.
+ * Deviating far from default value will likely result in a compression ratio decrease.
+ * Special: value 0 means "automatically determine hashRateLog". */
+
+ /* frame parameters */
+ ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+ * Content size must be known at the beginning of compression.
+ * This is automatically the case when using ZSTD_compress2(),
+ * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
+ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+ /* multi-threading parameters */
+ /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
+ * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
+ * In a situation where it's unknown if the linked library supports multi-threading or not,
+ * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
+ */
+ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
+ * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
+ * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
+ * while compression is performed in parallel, within worker thread(s).
+ * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
+ * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
+ * More workers improve speed, but also increase memory usage.
+ * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
+ * compression is performed inside Caller's thread, and all invocations are blocking */
+ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
+ * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
+ * 0 means default, which is dynamically determined based on compression parameters.
+ * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
+ * The minimum size is automatically and transparently enforced. */
+ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
+ * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
+ * It helps preserve compression ratio, while each job is compressed in parallel.
+ * This value is enforced only when nbWorkers >= 1.
+ * Larger values increase compression ratio, but decrease speed.
+ * Possible values range from 0 to 9 :
+ * - 0 means "default" : value will be determined by the library, depending on strategy
+ * - 1 means "no overlap"
+ * - 9 means "full overlap", using a full window size.
+ * Each intermediate rank increases/decreases load size by a factor 2 :
+ * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default
+ * default value varies between 6 and 9, depending on strategy */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_c_rsyncable
+ * ZSTD_c_format
+ * ZSTD_c_forceMaxWindow
+ * ZSTD_c_forceAttachDict
+ * ZSTD_c_literalCompressionMode
+ * ZSTD_c_targetCBlockSize
+ * ZSTD_c_srcSizeHint
+ * ZSTD_c_enableDedicatedDictSearch
+ * ZSTD_c_stableInBuffer
+ * ZSTD_c_stableOutBuffer
+ * ZSTD_c_blockDelimiters
+ * ZSTD_c_validateSequences
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly;
+ * also, the enums values themselves are unstable and can still change.
+ */
+ ZSTD_c_experimentalParam1=500,
+ ZSTD_c_experimentalParam2=10,
+ ZSTD_c_experimentalParam3=1000,
+ ZSTD_c_experimentalParam4=1001,
+ ZSTD_c_experimentalParam5=1002,
+ ZSTD_c_experimentalParam6=1003,
+ ZSTD_c_experimentalParam7=1004,
+ ZSTD_c_experimentalParam8=1005,
+ ZSTD_c_experimentalParam9=1006,
+ ZSTD_c_experimentalParam10=1007,
+ ZSTD_c_experimentalParam11=1008,
+ ZSTD_c_experimentalParam12=1009
+} ZSTD_cParameter;
+
+typedef struct {
+ size_t error;
+ int lowerBound;
+ int upperBound;
+} ZSTD_bounds;
+
+/*! ZSTD_cParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - lower and upper bounds, both inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
+
+/*! ZSTD_CCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is generally only possible during frame initialization (before starting compression).
+ * Exception : when using multi-threading mode (nbWorkers >= 1),
+ * the following parameters can be updated _during_ compression (within same frame):
+ * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ * new parameters will be active for next job only (after a flush()).
+ * @return : an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ * Total input data size to be compressed as a single frame.
+ * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
+ * This value will also be controlled at end of frame, and trigger an error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
+ * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
+ * Note 2 : pledgedSrcSize is only valid once, for the next frame.
+ * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note 3 : Whenever all input data is provided and consumed in a single round,
+ * for example with ZSTD_compress2(),
+ * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
+ * this value is automatically overridden by srcSize instead.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+typedef enum {
+ ZSTD_reset_session_only = 1,
+ ZSTD_reset_parameters = 2,
+ ZSTD_reset_session_and_parameters = 3
+} ZSTD_ResetDirective;
+
+/*! ZSTD_CCtx_reset() :
+ * There are 2 different things that can be reset, independently or jointly :
+ * - The session : will stop compressing current frame, and make CCtx ready to start a new one.
+ * Useful after an error, or to interrupt any ongoing compression.
+ * Any internal data not yet flushed is cancelled.
+ * Compression parameters and dictionary remain unchanged.
+ * They will be used to compress next frame.
+ * Resetting session never fails.
+ * - The parameters : changes all parameters back to "default".
+ * This removes any reference to any dictionary too.
+ * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
+ * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
+ * - Both : similar to resetting the session, followed by resetting parameters.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+/*! ZSTD_compress2() :
+ * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ * ZSTD_compress2() always starts a new frame.
+ * Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - The function is always blocking, returns when compression is completed.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *************************************
+* Advanced decompression API
+***************************************/
+
+/* The advanced API pushes parameters one by one into an existing DCtx context.
+ * Parameters are sticky, and remain valid for all following frames
+ * using the same DCtx context.
+ * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
+ * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
+ * Therefore, no new decompression function is necessary.
+ */
+
+typedef enum {
+
+ ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
+ * the streaming API will refuse to allocate memory buffer
+ * in order to protect the host from unreasonable memory requirements.
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+ * Special: value 0 means "use default maximum windowLog". */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_d_format
+ * ZSTD_d_stableOutBuffer
+ * ZSTD_d_forceIgnoreChecksum
+ * ZSTD_d_refMultipleDDicts
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly
+ */
+ ZSTD_d_experimentalParam1=1000,
+ ZSTD_d_experimentalParam2=1001,
+ ZSTD_d_experimentalParam3=1002,
+ ZSTD_d_experimentalParam4=1003
+
+} ZSTD_dParameter;
+
+/*! ZSTD_dParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - both lower and upper bounds, inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
+
+/*! ZSTD_DCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_dParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is only possible during frame initialization (before starting decompression).
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * Session and parameters can be reset jointly or separately.
+ * Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+
+
+/* **************************
+* Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+ const void* src; /*< start of input buffer */
+ size_t size; /*< size of input buffer */
+ size_t pos; /*< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+ void* dst; /*< start of output buffer */
+ size_t size; /*< size of output buffer */
+ size_t pos; /*< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+* Streaming compression - HowTo
+*
+* A ZSTD_CStream object is required to track streaming operation.
+* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+*
+* For parallel execution, use one separate ZSTD_CStream per thread.
+*
+* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+*
+* Parameters are sticky : when starting a new compression on the same context,
+* it will re-use the same sticky parameters as previous compression session.
+* When in doubt, it's recommended to fully initialize the context before usage.
+* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+* set more specific parameters, the pledged source size, or load a dictionary.
+*
+* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+* consume input stream. The function will automatically update both `pos`
+* fields within `input` and `output`.
+* Note that the function may not consume the entire input, for example, because
+* the output buffer is already full, in which case `input.pos < input.size`.
+* The caller must check if input has been entirely consumed.
+* If not, the caller must make some room to receive more compressed data,
+* and then present again remaining input data.
+* note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+* but doesn't guarantee maximal forward progress. This is especially relevant
+* when compressing with multiple threads. The call won't block if it can
+* consume some input, but if it can't it will wait for some, but not all,
+* output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+* or an error code, which can be tested using ZSTD_isError().
+*
+* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+* operation.
+* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if internal buffers are entirely flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+* start a new frame.
+* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if frame fully completed and fully flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream; /*< CCtx and CStream are now effectively same object (>= v1.3.0) */
+ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */
+
+/*===== Streaming compression functions =====*/
+typedef enum {
+ ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+ ZSTD_e_flush=1, /* flush any data provided so far,
+ * it creates (at least) one new block, that can be decoded immediately on reception;
+ * frame will continue: any future data can still reference previously compressed data, improving compression.
+ * note : multithreaded compression will block to flush as much output as possible. */
+ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame.
+ * note that frame is only closed after compressed data is fully flushed (return value == 0).
+ * After that point, any additional data starts a new frame.
+ * note : each frame is independent (does not reference any content from previous frame).
+ : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compressStream2() :
+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ * - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - endOp must be a valid directive
+ * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
+ * and then immediately returns, just indicating that there is some data remaining to be flushed.
+ * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ * - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ * or an error code, which can be tested using ZSTD_isError().
+ * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+
+/* These buffer sizes are softly recommended.
+ * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
+ * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
+ * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
+ *
+ * However, note that these recommendations are from the perspective of a C caller program.
+ * If the streaming interface is invoked from some other language,
+ * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
+ * a major performance rule is to reduce crossing such interface to an absolute minimum.
+ * It's not rare that performance ends being spent more into the interface, rather than compression itself.
+ * In which cases, prefer using large buffers, as large as practical,
+ * for both input and output, to reduce the nb of roundtrips.
+ */
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /*< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
+
+
+/* *****************************************************************************
+ * This following is a legacy streaming API.
+ * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
+ * It is redundant, but remains fully supported.
+ * Advanced parameters and dictionary compression can only be used through the
+ * new API.
+ ******************************************************************************/
+
+/*!
+ * Equivalent to:
+ *
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/*!
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the minimum nb of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+
+/*-***************************************************************************
+* Streaming decompression - HowTo
+*
+* A ZSTD_DStream object is required to track streaming operations.
+* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+* ZSTD_DStream objects can be re-used multiple times.
+*
+* Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+* Alternatively, use advanced API to set specific properties.
+*
+* Use ZSTD_decompressStream() repetitively to consume your input.
+* The function will update both `pos` fields.
+* If `input.pos < input.size`, some input has not been consumed.
+* It's up to the caller to present again remaining data.
+* The function tries to flush all data decoded immediately, respecting output buffer size.
+* If `output.pos < output.size`, decoder has flushed everything it could.
+* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* or an error code, which can be tested using ZSTD_isError(),
+* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+* the return value is a suggested next input size (just a hint for better latency)
+* that will never request more than the remaining frame size.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream; /*< DCtx and DStream are now effectively same object (>= v1.3.0) */
+ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */
+
+/*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, NULL);
+ */
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+
+/* ************************
+* Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ * Compression at an explicit compression level using a Dictionary.
+ * A dictionary can be any arbitrary data segment (also called a prefix),
+ * or a buffer with specified information (see dictBuilder/zdict.h).
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ * Decompression using a known Dictionary.
+ * Dictionary must be identical to the one used during compression.
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/* *********************************
+ * Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ * When compressing multiple messages or blocks using the same dictionary,
+ * it's recommended to digest the dictionary only once, since it's a costly operation.
+ * ZSTD_createCDict() will create a state from digesting a dictionary.
+ * The resulting state can be used for future compression operations with very limited startup cost.
+ * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
+ * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
+ * in which case the only thing that it transports is the @compressionLevel.
+ * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
+ * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ * Function frees memory allocated by ZSTD_createCDict().
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times.
+ * Note : compression level is _decided at dictionary creation time_,
+ * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ * Function frees memory allocated with ZSTD_createDDict()
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ * Decompression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict);
+
+
+/* ******************************
+ * Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompressed the frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary to be decoded (most common case).
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/* *****************************************************************************
+ * Advanced dictionary and prefix API
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
+/*! ZSTD_CCtx_loadDictionary() :
+ * Create an internal CDict from `dict` buffer.
+ * Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
+ * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 2 : Loading a dictionary involves building tables.
+ * It's also a CPU consuming operation, with non-negligible impact on latency.
+ * Tables are dependent on compression parameters, and for this reason,
+ * compression parameters can no longer be changed after loading a dictionary.
+ * Note 3 :`dict` content will be copied internally.
+ * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
+ * In such a case, dictionary buffer must outlive its users.
+ * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ * to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_refCDict() :
+ * Reference a prepared dictionary, to be used for all next compressed frames.
+ * Note that compression parameters are enforced from within CDict,
+ * and supersede any compression parameter previously set within CCtx.
+ * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
+ * The dictionary will remain valid for future compressed frames using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Referencing a NULL CDict means "return to no-dictionary mode".
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compressed frame.
+ * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
+ * Decompression will need same prefix to properly regenerate data.
+ * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ * but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ * Note 1 : Prefix buffer is referenced. It **must** outlive compression.
+ * Its content must remain unmodified during compression.
+ * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ * ensure that the window size is large enough to contain the entire source.
+ * See ZSTD_c_windowLog.
+ * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ * It's a CPU consuming operation, with non-negligible impact on latency.
+ * If there is a need to use the same prefix multiple times, consider loadDictionary instead.
+ * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
+ * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize);
+
+/*! ZSTD_DCtx_loadDictionary() :
+ * Create an internal DDict from dict buffer,
+ * to be used to decompress next frames.
+ * The dictionary remains valid for all future frames, until explicitly invalidated.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Loading a dictionary involves building tables,
+ * which has a non-negligible impact on CPU usage and latency.
+ * It's recommended to "load once, use many times", to amortize the cost
+ * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
+ * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
+ * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
+ * how dictionary content is loaded and interpreted.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_refDDict() :
+ * Reference a prepared dictionary, to be used to decompress next frames.
+ * The dictionary remains active for decompression of future frames using same DCtx.
+ *
+ * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
+ * will store the DDict references in a table, and the DDict used for decompression
+ * will be determined at decompression time, as per the dict ID in the frame.
+ * The memory for the table is allocated on the first call to refDDict, and can be
+ * freed with ZSTD_freeDCtx().
+ *
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Special: referencing a NULL DDict means "return to no-dictionary mode".
+ * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+/*! ZSTD_DCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) to decompress next frame.
+ * This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ * and must use the same prefix as the one used during compression.
+ * Prefix is **only used once**. Reference is discarded at end of frame.
+ * End of frame is reached when ZSTD_decompressStream() returns 0.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ * Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
+ * Prefix buffer must remain unmodified up to the end of frame,
+ * reached when ZSTD_decompressStream() returns 0.
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
+ * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ * A full dictionary is more costly, as it requires building tables.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+ const void* prefix, size_t prefixSize);
+
+/* === Memory management === */
+
+/*! ZSTD_sizeof_*() :
+ * These functions give the _current_ memory usage of selected object.
+ * Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+#endif /* ZSTD_H_235446 */
+
+
+/* **************************************************************************************
+ * ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
+ * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/* **************************************************************************************
+ * experimental API (static linking only)
+ ****************************************************************************************
+ * The following symbols and constants
+ * are not planned to join "stable API" status in the near future.
+ * They can still change in future versions.
+ * Some of them are planned to remain in the static_only section indefinitely.
+ * Some of them might be removed in the future (especially when redundant with existing stable functions)
+ * ***************************************************************************************/
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
+#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */
+#define ZSTD_SKIPPABLEHEADERSIZE 8
+
+/* compression parameter bounds */
+#define ZSTD_WINDOWLOG_MAX_32 30
+#define ZSTD_WINDOWLOG_MAX_64 31
+#define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX_32 29
+#define ZSTD_CHAINLOG_MAX_64 30
+#define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_STRATEGY_MIN ZSTD_fast
+#define ZSTD_STRATEGY_MAX ZSTD_btultra2
+
+
+#define ZSTD_OVERLAPLOG_MIN 0
+#define ZSTD_OVERLAPLOG_MAX 9
+
+#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame
+ * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
+ * to preserve host's memory from unreasonable requirements.
+ * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+ * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
+
+
+/* LDM parameter bounds */
+#define ZSTD_LDM_HASHLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_LDM_HASHLOG_MAX ZSTD_HASHLOG_MAX
+#define ZSTD_LDM_MINMATCH_MIN 4
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_BUCKETSIZELOG_MIN 1
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+#define ZSTD_LDM_HASHRATELOG_MIN 0
+#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+
+/* Advanced parameter bounds */
+#define ZSTD_TARGETCBLOCKSIZE_MIN 64
+#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_SRCSIZEHINT_MIN 0
+#define ZSTD_SRCSIZEHINT_MAX INT_MAX
+
+/* internal */
+#define ZSTD_HASHLOG3_MAX 17
+
+
+/* --- Advanced types --- */
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
+typedef struct {
+ unsigned int offset; /* The offset of the match. (NOT the same as the offset code)
+ * If offset == 0 and matchLength == 0, this sequence represents the last
+ * literals in the block of litLength size.
+ */
+
+ unsigned int litLength; /* Literal length of the sequence. */
+ unsigned int matchLength; /* Match length of the sequence. */
+
+ /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
+ * In this case, we will treat the sequence as a marker for a block boundary.
+ */
+
+ unsigned int rep; /* Represents which repeat offset is represented by the field 'offset'.
+ * Ranges from [0, 3].
+ *
+ * Repeat offsets are essentially previous offsets from previous sequences sorted in
+ * recency order. For more detail, see doc/zstd_compression_format.md
+ *
+ * If rep == 0, then 'offset' does not contain a repeat offset.
+ * If rep > 0:
+ * If litLength != 0:
+ * rep == 1 --> offset == repeat_offset_1
+ * rep == 2 --> offset == repeat_offset_2
+ * rep == 3 --> offset == repeat_offset_3
+ * If litLength == 0:
+ * rep == 1 --> offset == repeat_offset_2
+ * rep == 2 --> offset == repeat_offset_3
+ * rep == 3 --> offset == repeat_offset_1 - 1
+ *
+ * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
+ * 'rep', but repeat offsets do not necessarily need to be calculated from an external
+ * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * use this 'rep' field at all (as of now).
+ */
+} ZSTD_Sequence;
+
+typedef struct {
+ unsigned windowLog; /*< largest match distance : larger == more compression, more memory needed during decompression */
+ unsigned chainLog; /*< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+ unsigned hashLog; /*< dispatch table : larger == faster, more memory */
+ unsigned searchLog; /*< nb of searches : larger == more compression, slower */
+ unsigned minMatch; /*< match length searched : larger == faster decompression, sometimes less compression */
+ unsigned targetLength; /*< acceptable match size for optimal parser (only) : larger == more compression, slower */
+ ZSTD_strategy strategy; /*< see ZSTD_strategy definition above */
+} ZSTD_compressionParameters;
+
+typedef struct {
+ int contentSizeFlag; /*< 1: content size will be in frame header (when known) */
+ int checksumFlag; /*< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
+ int noDictIDFlag; /*< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+typedef enum {
+ ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+ ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+ ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
+} ZSTD_dictContentType_e;
+
+typedef enum {
+ ZSTD_dlm_byCopy = 0, /*< Copy dictionary content internally */
+ ZSTD_dlm_byRef = 1 /*< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
+typedef enum {
+ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
+ ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring this instruction. */
+} ZSTD_format_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
+ ZSTD_d_validateChecksum = 0,
+ ZSTD_d_ignoreChecksum = 1
+} ZSTD_forceIgnoreChecksum_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_refMultipleDDicts */
+ ZSTD_rmd_refSingleDDict = 0,
+ ZSTD_rmd_refMultipleDDicts = 1
+} ZSTD_refMultipleDDicts_e;
+
+typedef enum {
+ /* Note: this enum and the behavior it controls are effectively internal
+ * implementation details of the compressor. They are expected to continue
+ * to evolve and should be considered only in the context of extremely
+ * advanced performance tuning.
+ *
+ * Zstd currently supports the use of a CDict in three ways:
+ *
+ * - The contents of the CDict can be copied into the working context. This
+ * means that the compression can search both the dictionary and input
+ * while operating on a single set of internal tables. This makes
+ * the compression faster per-byte of input. However, the initial copy of
+ * the CDict's tables incurs a fixed cost at the beginning of the
+ * compression. For small compressions (< 8 KB), that copy can dominate
+ * the cost of the compression.
+ *
+ * - The CDict's tables can be used in-place. In this model, compression is
+ * slower per input byte, because the compressor has to search two sets of
+ * tables. However, this model incurs no start-up cost (as long as the
+ * working context's tables can be reused). For small inputs, this can be
+ * faster than copying the CDict's tables.
+ *
+ * - The CDict's tables are not used at all, and instead we use the working
+ * context alone to reload the dictionary and use params based on the source
+ * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
+ * This method is effective when the dictionary sizes are very small relative
+ * to the input size, and the input size is fairly large to begin with.
+ *
+ * Zstd has a simple internal heuristic that selects which strategy to use
+ * at the beginning of a compression. However, if experimentation shows that
+ * Zstd is making poor choices, it is possible to override that choice with
+ * this enum.
+ */
+ ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
+ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */
+ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
+ ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
+} ZSTD_dictAttachPref_e;
+
+typedef enum {
+ ZSTD_lcm_auto = 0, /*< Automatically determine the compression mode based on the compression level.
+ * Negative compression levels will be uncompressed, and positive compression
+ * levels will be compressed. */
+ ZSTD_lcm_huffman = 1, /*< Always attempt Huffman compression. Uncompressed literals will still be
+ * emitted if Huffman compression is not profitable. */
+ ZSTD_lcm_uncompressed = 2 /*< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
+
+
+/* *************************************
+* Frame size functions
+***************************************/
+
+/*! ZSTD_findDecompressedSize() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - decompressed size of all data in all successive frames
+ * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ * note 3 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure result fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ * read each contained frame header. This is fast as most of the data is skipped,
+ * however it does mean that all frame data must be present and valid. */
+ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_decompressBound() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - upper-bound for the decompressed size of all data in all successive frames
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ * upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
+/*! ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+typedef enum {
+ ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
+} ZSTD_sequenceFormat_e;
+
+/*! ZSTD_generateSequences() :
+ * Generate sequences using ZSTD_compress2, given a source buffer.
+ *
+ * Each block will end with a dummy sequence
+ * with offset == 0, matchLength == 0, and litLength == length of last literals.
+ * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
+ * simply acts as a block delimiter.
+ *
+ * zc can be used to insert custom compression params.
+ * This function invokes ZSTD_compress2
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
+ * @return : number of sequences generated
+ */
+
+ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize);
+
+/*! ZSTD_mergeBlockDelimiters() :
+ * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
+ * by merging them into into the literals of the next sequence.
+ *
+ * As such, the final generated result has no explicit representation of block boundaries,
+ * and the final last literals segment is not represented in the sequences.
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
+ * @return : number of sequences left after merging
+ */
+ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
+
+/*! ZSTD_compressSequences() :
+ * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
+ * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * The entire source is compressed into a single frame.
+ *
+ * The compression behavior changes based on cctx params. In particular:
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
+ * the block size derived from the cctx, and sequences may be split. This is the default setting.
+ *
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ *
+ * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ *
+ * In addition to the two adjustable experimental params, there are other important cctx params.
+ * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
+ * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
+ * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
+ * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
+ *
+ * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
+ * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history
+ * @return : final compressed size or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+
+
+/*! ZSTD_writeSkippableFrame() :
+ * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
+ * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
+ * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, if the source size is not representable
+ * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant);
+
+
+/* *************************************
+* Memory management
+***************************************/
+
+/*! ZSTD_estimate*() :
+ * These functions make it possible to estimate memory usage
+ * of a future {D,C}Ctx, before its creation.
+ *
+ * ZSTD_estimateCCtxSize() will provide a memory budget large enough
+ * for any compression level up to selected one.
+ * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
+ * does not include space for a window buffer.
+ * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
+ * The estimate will assume the input may be arbitrarily large,
+ * which is the worst case.
+ *
+ * When srcSize can be bound by a known and rather "small" value,
+ * this fact can be used to provide a tighter estimation
+ * because the CCtx compression context will need less memory.
+ * This tighter estimation can be provided by more advanced functions
+ * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
+ * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
+ * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
+ *
+ * Note 2 : only single-threaded compression is supported.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_estimateCStreamSize() :
+ * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note : CStream size estimation is only correct for single-threaded compression.
+ * ZSTD_DStream memory budget depends on window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
+ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ * an internal ?Dict will be created, which additional size is not estimated here.
+ * In this case, get total size by adding ZSTD_estimate?DictSize */
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+/*! ZSTD_estimate?DictSize() :
+ * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+
+/*! ZSTD_initStatic*() :
+ * Initialize an object using a pre-allocated fixed-size buffer.
+ * workspace: The memory area to emplace the object into.
+ * Provided pointer *must be 8-bytes aligned*.
+ * Buffer must outlive object.
+ * workspaceSize: Use ZSTD_estimate*Size() to determine
+ * how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ * or NULL if error (size too small, incorrect alignment, etc.)
+ * Note : zstd will never resize nor malloc() when using a static buffer.
+ * If the object requires more memory than available,
+ * zstd will just error out (typically ZSTD_error_memory_allocation).
+ * Note 2 : there is no corresponding "free" function.
+ * Since workspace is allocated externally, it must be freed externally too.
+ * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ * into its associated cParams.
+ * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ * Limitation 2 : static cctx currently not compatible with multi-threading.
+ * Limitation 3 : static dctx is incompatible with legacy support.
+ */
+ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */
+
+ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */
+
+ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams);
+
+ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType);
+
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass your own allocation/free functions.
+ * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+static
+__attribute__((__unused__))
+ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */
+
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem);
+
+/* ! Thread pool :
+ * These prototypes make it possible to share a thread pool among multiple compression contexts.
+ * This can limit resources for applications with multiple threads where each one uses
+ * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
+ * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
+ * Note that the lifetime of such pool must exist while being used.
+ * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
+ * to use an internal thread pool).
+ * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
+ */
+typedef struct POOL_ctx_s ZSTD_threadPool;
+ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
+ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
+ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
+
+
+/*
+ * This API is temporary and is expected to change or disappear in the future!
+ */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem);
+
+
+/* *************************************
+* Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ * Create a digested dictionary for compression
+ * Dictionary content is just referenced, not duplicated.
+ * As a consequence, `dictBuffer` **must** outlive CDict,
+ * and its content must remain unmodified throughout the lifetime of CDict.
+ * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_getDictID_fromCDict() :
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+ * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+ * Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+ * optimize params for a given `srcSize` and `dictSize`.
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ * cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ * This function never fails (wide contract) */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+ * Note : this function is now DEPRECATED.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
+ * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
+ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
+
+/*! ZSTD_compress_usingCDict_advanced() :
+ * Note : this function is now REDUNDANT.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
+ * This prototype will be marked as deprecated and generate compilation warning in some future version */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams);
+
+
+/*! ZSTD_CCtx_loadDictionary_byReference() :
+ * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
+ * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_loadDictionary_advanced() :
+ * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_refPrefix_advanced() :
+ * Same as ZSTD_CCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/* === experimental parameters === */
+/* these parameters can be used with ZSTD_setParameter()
+ * they are not guaranteed to remain supported in the future */
+
+ /* Enables rsyncable mode,
+ * which makes compressed files more rsync friendly
+ * by adding periodic synchronization points to the compressed data.
+ * The target average block size is ZSTD_c_jobSize / 2.
+ * It's possible to modify the job size to increase or decrease
+ * the granularity of the synchronization point.
+ * Once the jobSize is smaller than the window size,
+ * it will result in compression ratio degradation.
+ * NOTE 1: rsyncable mode only works when multithreading is enabled.
+ * NOTE 2: rsyncable performs poorly in combination with long range mode,
+ * since it will decrease the effectiveness of synchronization points,
+ * though mileage may vary.
+ * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
+ * If the selected compression level is already running significantly slower,
+ * the overall speed won't be significantly impacted.
+ */
+ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
+
+/* Select a compression format.
+ * The value must be of type ZSTD_format_e.
+ * See ZSTD_format_e enum definition for details */
+#define ZSTD_c_format ZSTD_c_experimentalParam2
+
+/* Force back-reference distances to remain < windowSize,
+ * even when referencing into Dictionary content (default:0) */
+#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
+
+/* Controls whether the contents of a CDict
+ * are used in place, or copied into the working context.
+ * Accepts values from the ZSTD_dictAttachPref_e enum.
+ * See the comments on that enum for an explanation of the feature. */
+#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
+
+/* Controls how the literals are compressed (default is auto).
+ * The value must be of type ZSTD_literalCompressionMode_e.
+ * See ZSTD_literalCompressionMode_t enum definition for details.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+/* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size (default:0) */
+#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
+
+/* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size,
+ * but compression ratio may regress significantly if guess considerably underestimates */
+#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
+
+/* Controls whether the new and experimental "dedicated dictionary search
+ * structure" can be used. This feature is still rough around the edges, be
+ * prepared for surprising behavior!
+ *
+ * How to use it:
+ *
+ * When using a CDict, whether to use this feature or not is controlled at
+ * CDict creation, and it must be set in a CCtxParams set passed into that
+ * construction (via ZSTD_createCDict_advanced2()). A compression will then
+ * use the feature or not based on how the CDict was constructed; the value of
+ * this param, set in the CCtx, will have no effect.
+ *
+ * However, when a dictionary buffer is passed into a CCtx, such as via
+ * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
+ * whether the CDict that is created internally can use the feature or not.
+ *
+ * What it does:
+ *
+ * Normally, the internal data structures of the CDict are analogous to what
+ * would be stored in a CCtx after compressing the contents of a dictionary.
+ * To an approximation, a compression using a dictionary can then use those
+ * data structures to simply continue what is effectively a streaming
+ * compression where the simulated compression of the dictionary left off.
+ * Which is to say, the search structures in the CDict are normally the same
+ * format as in the CCtx.
+ *
+ * It is possible to do better, since the CDict is not like a CCtx: the search
+ * structures are written once during CDict creation, and then are only read
+ * after that, while the search structures in the CCtx are both read and
+ * written as the compression goes along. This means we can choose a search
+ * structure for the dictionary that is read-optimized.
+ *
+ * This feature enables the use of that different structure.
+ *
+ * Note that some of the members of the ZSTD_compressionParameters struct have
+ * different semantics and constraints in the dedicated search structure. It is
+ * highly recommended that you simply set a compression level in the CCtxParams
+ * you pass into the CDict creation call, and avoid messing with the cParams
+ * directly.
+ *
+ * Effects:
+ *
+ * This will only have any effect when the selected ZSTD_strategy
+ * implementation supports this feature. Currently, that's limited to
+ * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
+ *
+ * Note that this means that the CDict tables can no longer be copied into the
+ * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
+ * useable. The dictionary can only be attached or reloaded.
+ *
+ * In general, you should expect compression to be faster--sometimes very much
+ * so--and CDict creation to be slightly slower. Eventually, we will probably
+ * make this mode the default.
+ */
+#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
+
+/* ZSTD_c_stableInBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the compressor, and
+ * compression will fail if it ever changes. This means the only flush
+ * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
+ * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
+ * MUST not be modified during compression or you will get data corruption.
+ *
+ * When this flag is enabled zstd won't allocate an input window buffer,
+ * because the user guarantees it can reference the ZSTD_inBuffer until
+ * the frame is complete. But, it will still allocate an output buffer
+ * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
+ * avoid the memcpy() from the input buffer to the input window buffer.
+ *
+ * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
+ * That means this flag cannot be used with ZSTD_compressStream().
+ *
+ * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, compression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
+ * not be modified during compression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_inBuffer to find
+ * matches. Normally zstd maintains its own window buffer for this purpose,
+ * but passing this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
+
+/* ZSTD_c_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells he compressor that the ZSTD_outBuffer will not be resized between
+ * calls. Specifically: (out.size - out.pos) will never grow. This gives the
+ * compressor the freedom to say: If the compressed data doesn't fit in the
+ * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
+ * always decompress directly into the output buffer, instead of decompressing
+ * into an internal buffer and copying to the output buffer.
+ *
+ * When this flag is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer. It will still allocate the
+ * input window buffer (see ZSTD_c_stableInBuffer).
+ *
+ * Zstd will check that (out.size - out.pos) never grows and return an error
+ * if it does. While not strictly necessary, this should prevent surprises.
+ */
+#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
+
+/* ZSTD_c_blockDelimiters
+ * Default is 0 == ZSTD_sf_noBlockDelimiters.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ *
+ * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
+ * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
+ * See the definition of ZSTD_Sequence for more specifics.
+ */
+#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
+
+/* ZSTD_c_validateSequences
+ * Default is 0 == disabled. Set to 1 to enable sequence validation.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * during function execution.
+ *
+ * Without validation, providing a sequence that does not conform to the zstd spec will cause
+ * undefined behavior, and may produce a corrupted block.
+ *
+ * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and
+ * return an error.
+ *
+ */
+#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
+
+/*! ZSTD_CCtx_getParameter() :
+ * Get the requested compression parameter value, selected by enum ZSTD_cParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+
+
+/*! ZSTD_CCtx_params :
+ * Quick howto :
+ * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ * an existing CCtx.
+ * These parameters will be applied to
+ * all subsequent frames.
+ * - ZSTD_compressStream2() : Do compression using the CCtx.
+ * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
+ *
+ * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ * for static allocation of CCtx for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
+
+/*! ZSTD_CCtxParams_reset() :
+ * Reset params to default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ * Initializes the compression parameters of cctxParams according to
+ * compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ * Initializes the compression and frame parameters of cctxParams according to
+ * params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+/*! ZSTD_CCtxParams_setParameter() :
+ * Similar to ZSTD_CCtx_setParameter.
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Parameters must be applied to a ZSTD_CCtx using
+ * ZSTD_CCtx_setParametersUsingCCtxParams().
+ * @result : a code representing success or failure (which can be tested with
+ * ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtxParams_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * Apply a set of ZSTD_CCtx_params to the compression context.
+ * This can be done even after compression is started,
+ * if nbWorkers==0, this will have no impact until a new compression is started.
+ * if nbWorkers>=1, new parameters will be picked up at next job,
+ * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+/*! ZSTD_compressStream2_simpleArgs() :
+ * Same as ZSTD_compressStream2(),
+ * but using only integral types as arguments.
+ * This variant might be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp);
+
+
+/* *************************************
+* Advanced decompression functions
+***************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * Dictionary content is referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives DDict,
+ * it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_byReference() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but references `dict` content instead of copying it into `dctx`.
+ * This saves memory if `dict` remains around.,
+ * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_advanced() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but gives direct control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?). */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_refPrefix_advanced() :
+ * Same as ZSTD_DCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ * Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ * This protects a decoder context from reserving too much memory for itself (potential attack scenario).
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+/*! ZSTD_DCtx_getParameter() :
+ * Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
+
+/* ZSTD_d_format
+ * experimental parameter,
+ * allowing selection between ZSTD_format_e input compression formats
+ */
+#define ZSTD_d_format ZSTD_d_experimentalParam1
+/* ZSTD_d_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the decompressor, and
+ * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
+ * MUST be large enough to fit the entire decompressed frame. This will be
+ * checked when the frame content size is known. The data in the ZSTD_outBuffer
+ * in the range [dst, dst + pos) MUST not be modified during decompression
+ * or you will get data corruption.
+ *
+ * When this flags is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer, but it will still allocate
+ * an input buffer large enough to fit any compressed block. This will also
+ * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
+ * If you need to avoid the input buffer allocation use the buffer-less
+ * streaming API.
+ *
+ * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, decompression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
+ * not be modified during decompression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
+ * matches. Normally zstd maintains its own buffer for this purpose, but passing
+ * this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
+
+/* ZSTD_d_forceIgnoreChecksum
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * Tells the decompressor to skip checksum validation during decompression, regardless
+ * of whether checksumming was specified during compression. This offers some
+ * slight performance benefits, and may be useful for debugging.
+ * Param has values of type ZSTD_forceIgnoreChecksum_e
+ */
+#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
+
+/* ZSTD_d_refMultipleDDicts
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * If enabled and dctx is allocated on the heap, then additional memory will be allocated
+ * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
+ * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
+ * store all references. At decompression time, the appropriate dictID is selected
+ * from the set of DDicts based on the dictID in the frame.
+ *
+ * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
+ *
+ * Param has values of byte ZSTD_refMultipleDDicts_e
+ *
+ * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
+ * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
+ * Memory is allocated as per ZSTD_DCtx::customMem.
+ *
+ * Although this function allocates memory for the table, the user is still responsible for
+ * memory management of the underlying ZSTD_DDict* themselves.
+ */
+#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
+
+
+/*! ZSTD_DCtx_setFormat() :
+ * Instruct the decoder context about what kind of data to decode next.
+ * This instruction is mandatory to decode data without a fully-formed header,
+ * such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+/*! ZSTD_decompressStream_simpleArgs() :
+ * Same as ZSTD_decompressStream(),
+ * but using only integral types as arguments.
+ * This can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+
+
+/* ******************************************************************
+* Advanced streaming functions
+* Warning : most of these functions are now redundant with the Advanced API.
+* Once Advanced API reaches "stable" status,
+* redundant functions will be deprecated, and then at some point removed.
+********************************************************************/
+
+/*===== Advanced Streaming compression functions =====*/
+
+/*! ZSTD_initCStream_srcSize() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+ int compressionLevel,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingDict() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_initCStream_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd parameter and leave the rest as-is.
+ * for ((param, value) : params) {
+ * ZSTD_CCtx_setParameter(zcs, param, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
+ * pledgedSrcSize must be correct.
+ * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingCDict() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+
+/*! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is DEPRECATED, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
+ * for ((fParam, value) : fParams) {
+ * ZSTD_CCtx_setParameter(zcs, fParam, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_resetCStream() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * start a new frame, using same parameters from previous frame.
+ * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+
+
+typedef struct {
+ unsigned long long ingested; /* nb input bytes read and buffered */
+ unsigned long long consumed; /* nb input bytes actually compressed */
+ unsigned long long produced; /* nb of compressed bytes generated and buffered */
+ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+ unsigned currentJobID; /* MT only : latest started job nb */
+ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */
+} ZSTD_frameProgression;
+
+/* ZSTD_getFrameProgression() :
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
+ */
+ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_toFlushNow() :
+ * Tell how many bytes are ready to be flushed immediately.
+ * Useful for multithreading scenarios (nbWorkers >= 1).
+ * Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ * and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ * if @return == 0, it means either :
+ * + there is no active job (could be checked with ZSTD_frameProgression()), or
+ * + oldest job is still actively compressing data,
+ * but everything it has produced has also been flushed so far,
+ * therefore flush speed is limited by production speed of oldest job
+ * irrespective of the speed of concurrent (and newer) jobs.
+ */
+ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+
+
+/*===== Advanced Streaming decompression functions =====*/
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
+
+/* *******************************************************************
+* Buffer-less and synchronous inner streaming functions
+*
+* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
+********************************************************************* */
+
+/*
+ Buffer-less streaming compression (synchronous mode)
+
+ A ZSTD_CCtx object is required to track streaming operations.
+ Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+ ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+ Start by initializing a context.
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
+ or ZSTD_compressBegin_advanced(), for finer parameter control.
+ It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+ Then, consume your input using ZSTD_compressContinue().
+ There are some important considerations to keep in mind when using this advanced function :
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+ - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+ Worst case evaluation is provided by ZSTD_compressBound().
+ ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+ - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+ It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+ - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+ In which case, it will "discard" the relevant memory section from its history.
+
+ Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+*/
+
+/*===== Buffer-less streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*
+ Buffer-less streaming decompression (synchronous mode)
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+ A ZSTD_DCtx object can be re-used multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+ Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ errorCode, which can be tested using ZSTD_isError().
+
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+ If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
+ Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+ It can also be an error code, which can be tested with ZSTD_isError().
+
+ A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+
+ Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+ This information is not required to properly decode a frame.
+
+ == Special case : skippable frames ==
+
+ Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
+ a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+ b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ c) Frame Content - any content (User Data) of length equal to Frame Size
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+*/
+
+/*===== Buffer-less streaming decompression functions =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID;
+ unsigned checksumFlag;
+} ZSTD_frameHeader;
+
+/*! ZSTD_getFrameHeader() :
+ * decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+
+
+
+/* ============================ */
+/* Block level API */
+/* ============================ */
+
+/*!
+ Block functions produce and decode raw zstd blocks, without frame metadata.
+ Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Compressing and decompressing require a context structure
+ + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+ - It is necessary to init context before starting
+ + compression : any ZSTD_compressBegin*() variant, including with dictionary
+ + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ + copyCCtx() and copyDCtx() can be used too
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
+ ===> In which case, nothing is produced into `dst` !
+ + User __must__ test for such outcome and deal directly with uncompressed data
+ + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
+ Doing so would mess up with statistics history, leading to potential data corruption.
+ + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ + In case of multiple successive blocks, should some of them be uncompressed,
+ decoder must be informed of their existence in order to follow proper history.
+ Use ZSTD_insertBlock() for such a case.
+*/
+
+/*===== Raw zstd block functions =====*/
+ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+
+
+#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
index 781371bff2ad..ef63bc205756 100644
--- a/include/media/hevc-ctrls.h
+++ b/include/media/hevc-ctrls.h
@@ -19,6 +19,7 @@
#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_CODEC_BASE + 1008)
#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_CODEC_BASE + 1009)
#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_BASE + 1010)
+#define V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_BASE + 1011)
#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_BASE + 1012)
#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_CODEC_BASE + 1015)
#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_CODEC_BASE + 1016)
@@ -27,6 +28,7 @@
#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120
#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121
#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122
+#define V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX 0x0123
#define V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS 0x0124
enum v4l2_mpeg_video_hevc_decode_mode {
@@ -225,6 +227,15 @@ struct v4l2_ctrl_hevc_decode_params {
__u64 flags;
};
+struct v4l2_ctrl_hevc_scaling_matrix {
+ __u8 scaling_list_4x4[6][16];
+ __u8 scaling_list_8x8[6][64];
+ __u8 scaling_list_16x16[6][64];
+ __u8 scaling_list_32x32[2][64];
+ __u8 scaling_list_dc_coef_16x16[6];
+ __u8 scaling_list_dc_coef_32x32[2];
+};
+
/* MPEG-class control IDs specific to the Hantro driver as defined by V4L2 */
#define V4L2_CID_CODEC_HANTRO_BASE (V4L2_CTRL_CLASS_CODEC | 0x1200)
/*
diff --git a/include/media/i2c/mt9p031.h b/include/media/i2c/mt9p031.h
index 7c29c53aa988..f933cd0be8e5 100644
--- a/include/media/i2c/mt9p031.h
+++ b/include/media/i2c/mt9p031.h
@@ -10,6 +10,7 @@ struct v4l2_subdev;
* @target_freq: Pixel clock frequency
*/
struct mt9p031_platform_data {
+ unsigned int pixclk_pol:1;
int ext_freq;
int target_freq;
};
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 09737b47881f..fea489f03d57 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -13,10 +13,11 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
+#include <linux/container_of.h>
#include <linux/fwnode.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/media.h>
+#include <linux/types.h>
/* Enums used internally at the media controller to represent graphs */
diff --git a/include/media/tuner.h b/include/media/tuner.h
index ff85d7227f91..a7796e0a3659 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -132,6 +132,7 @@
#define TUNER_SONY_BTF_PG472Z 89 /* PAL+SECAM */
#define TUNER_SONY_BTF_PK467Z 90 /* NTSC_JP */
#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */
+#define TUNER_SI2157 92
/* tv card specific */
#define TDA9887_PRESENT (1<<0)
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index fa4901162663..13ff3ad948f4 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -123,45 +123,45 @@ struct v4l2_async_notifier {
void v4l2_async_debug_init(struct dentry *debugfs_dir);
/**
- * v4l2_async_notifier_init - Initialize a notifier.
+ * v4l2_async_nf_init - Initialize a notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
*
* This function initializes the notifier @asd_list. It must be called
* before adding a subdevice to a notifier, using one of:
- * v4l2_async_notifier_add_fwnode_remote_subdev(),
- * v4l2_async_notifier_add_fwnode_subdev(),
- * v4l2_async_notifier_add_i2c_subdev(),
- * __v4l2_async_notifier_add_subdev() or
- * v4l2_async_notifier_parse_fwnode_endpoints().
+ * v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode(),
+ * v4l2_async_nf_add_i2c(),
+ * __v4l2_async_nf_add_subdev() or
+ * v4l2_async_nf_parse_fwnode_endpoints().
*/
-void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier);
/**
- * __v4l2_async_notifier_add_subdev - Add an async subdev to the
+ * __v4l2_async_nf_add_subdev - Add an async subdev to the
* notifier's master asd list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @asd: pointer to &struct v4l2_async_subdev
*
* \warning: Drivers should avoid using this function and instead use one of:
- * v4l2_async_notifier_add_fwnode_subdev(),
- * v4l2_async_notifier_add_fwnode_remote_subdev() or
- * v4l2_async_notifier_add_i2c_subdev().
+ * v4l2_async_nf_add_fwnode(),
+ * v4l2_async_nf_add_fwnode_remote() or
+ * v4l2_async_nf_add_i2c().
*
* Call this function before registering a notifier to link the provided @asd to
* the notifiers master @asd_list. The @asd must be allocated with k*alloc() as
* it will be freed by the framework when the notifier is destroyed.
*/
-int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd);
+int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
- struct fwnode_handle *fwnode,
- unsigned int asd_struct_size);
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size);
/**
- * v4l2_async_notifier_add_fwnode_subdev - Allocate and add a fwnode async
+ * v4l2_async_nf_add_fwnode - Allocate and add a fwnode async
* subdev to the notifier's master asd_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
@@ -175,16 +175,15 @@ __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
* notifiers @asd_list. The function also gets a reference of the fwnode which
* is released later at notifier cleanup time.
*/
-#define v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode, type) \
- ((type *)__v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode, \
- sizeof(type)))
+#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \
+ ((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type)))
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size);
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size);
/**
- * v4l2_async_notifier_add_fwnode_remote_subdev - Allocate and add a fwnode
+ * v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode
* remote async subdev to the
* notifier's master asd_list.
*
@@ -200,20 +199,18 @@ __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif
* function also gets a reference of the fwnode which is released later at
* notifier cleanup time.
*
- * This is just like v4l2_async_notifier_add_fwnode_subdev(), but with the
+ * This is just like v4l2_async_nf_add_fwnode(), but with the
* exception that the fwnode refers to a local endpoint, not the remote one.
*/
-#define v4l2_async_notifier_add_fwnode_remote_subdev(notifier, ep, type) \
- ((type *) \
- __v4l2_async_notifier_add_fwnode_remote_subdev(notifier, ep, \
- sizeof(type)))
+#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \
+ ((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type)))
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
- int adapter_id, unsigned short address,
- unsigned int asd_struct_size);
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier,
+ int adapter_id, unsigned short address,
+ unsigned int asd_struct_size);
/**
- * v4l2_async_notifier_add_i2c_subdev - Allocate and add an i2c async
+ * v4l2_async_nf_add_i2c - Allocate and add an i2c async
* subdev to the notifier's master asd_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
@@ -223,59 +220,59 @@ __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
* v4l2_async_subdev shall be the first member of the driver's async
* sub-device struct, i.e. both begin at the same memory address.
*
- * Same as v4l2_async_notifier_add_fwnode_subdev() but for I2C matched
+ * Same as v4l2_async_nf_add_fwnode() but for I2C matched
* sub-devices.
*/
-#define v4l2_async_notifier_add_i2c_subdev(notifier, adapter, address, type) \
- ((type *)__v4l2_async_notifier_add_i2c_subdev(notifier, adapter, \
- address, sizeof(type)))
+#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \
+ ((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \
+ sizeof(type)))
/**
- * v4l2_async_notifier_register - registers a subdevice asynchronous notifier
+ * v4l2_async_nf_register - registers a subdevice asynchronous notifier
*
* @v4l2_dev: pointer to &struct v4l2_device
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_subdev_notifier_register - registers a subdevice asynchronous
+ * v4l2_async_subdev_nf_register - registers a subdevice asynchronous
* notifier for a sub-device
*
* @sd: pointer to &struct v4l2_subdev
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_unregister - unregisters a subdevice
+ * v4l2_async_nf_unregister - unregisters a subdevice
* asynchronous notifier
*
* @notifier: pointer to &struct v4l2_async_notifier
*/
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_cleanup - clean up notifier resources
+ * v4l2_async_nf_cleanup - clean up notifier resources
* @notifier: the notifier the resources of which are to be cleaned up
*
* Release memory resources related to a notifier, including the async
* sub-devices allocated for the purposes of the notifier but not the notifier
* itself. The user is responsible for calling this function to clean up the
* notifier after calling
- * v4l2_async_notifier_add_fwnode_remote_subdev(),
- * v4l2_async_notifier_add_fwnode_subdev(),
- * v4l2_async_notifier_add_i2c_subdev(),
- * __v4l2_async_notifier_add_subdev() or
- * v4l2_async_notifier_parse_fwnode_endpoints().
+ * v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode(),
+ * v4l2_async_nf_add_i2c(),
+ * __v4l2_async_nf_add_subdev() or
+ * v4l2_async_nf_parse_fwnode_endpoints().
*
- * There is no harm from calling v4l2_async_notifier_cleanup() in other
+ * There is no harm from calling v4l2_async_nf_cleanup() in other
* cases as long as its memory has been zeroed after it has been
* allocated.
*/
-void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier);
/**
* v4l2_async_register_subdev - registers a sub-device to the asynchronous
@@ -295,7 +292,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd);
*
* This function is just like v4l2_async_register_subdev() with the exception
* that calling it will also parse firmware interfaces for remote references
- * using v4l2_async_notifier_parse_fwnode_sensor() and registers the
+ * using v4l2_async_nf_parse_fwnode_sensor() and registers the
* async sub-devices. The sub-device is similarly unregistered by calling
* v4l2_async_unregister_subdev().
*
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 6a4afd4a7df2..5cf1edefb822 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -260,8 +260,7 @@ struct v4l2_file_operations {
* Only set @dev_parent if that can't be deduced from @v4l2_dev.
*/
-struct video_device
-{
+struct video_device {
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_entity entity;
struct media_intf_devnode *intf_devnode;
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 7ab033b819eb..9c97f1dbd1c6 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -463,7 +463,7 @@ typedef int (*parse_endpoint_func)(struct device *dev,
struct v4l2_async_subdev *asd);
/**
- * v4l2_async_notifier_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
+ * v4l2_async_nf_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
* device node
* @dev: the device the endpoints of which are to be parsed
* @notifier: notifier for @dev
@@ -496,7 +496,7 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* to retain that configuration, the user needs to allocate memory for it.
*
* Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_cleanup() after it has been unregistered and the async
+ * v4l2_async_nf_cleanup() after it has been unregistered and the async
* sub-devices are no longer in use, even if the function returned an error.
*
* Return: %0 on success, including when no async sub-devices are found
@@ -505,10 +505,10 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* Other error codes as returned by @parse_endpoint
*/
int
-v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint);
+v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint);
/* Helper macros to access the connector links. */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 12955cb460d2..2467284e5f26 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -46,6 +46,7 @@ enum vb2_memory {
struct vb2_fileio_data;
struct vb2_threadio_data;
+struct vb2_buffer;
/**
* struct vb2_mem_ops - memory handling/memory allocator operations.
@@ -53,10 +54,8 @@ struct vb2_threadio_data;
* return ERR_PTR() on failure or a pointer to allocator private,
* per-buffer data on success; the returned private structure
* will then be passed as @buf_priv argument to other ops in this
- * structure. Additional gfp_flags to use when allocating the
- * are also passed to this operation. These flags are from the
- * gfp_flags field of vb2_queue. The size argument to this function
- * shall be *page aligned*.
+ * structure. The size argument to this function shall be
+ * *page aligned*.
* @put: inform the allocator that the buffer will no longer be used;
* usually will result in the allocator freeing the buffer (if
* no other users of this buffer are present); the @buf_priv
@@ -117,31 +116,33 @@ struct vb2_threadio_data;
* map_dmabuf, unmap_dmabuf.
*/
struct vb2_mem_ops {
- void *(*alloc)(struct device *dev, unsigned long attrs,
- unsigned long size,
- enum dma_data_direction dma_dir,
- gfp_t gfp_flags);
+ void *(*alloc)(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size);
void (*put)(void *buf_priv);
- struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
-
- void *(*get_userptr)(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir);
+ struct dma_buf *(*get_dmabuf)(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags);
+
+ void *(*get_userptr)(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long vaddr,
+ unsigned long size);
void (*put_userptr)(void *buf_priv);
void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv);
- void *(*attach_dmabuf)(struct device *dev,
+ void *(*attach_dmabuf)(struct vb2_buffer *vb,
+ struct device *dev,
struct dma_buf *dbuf,
- unsigned long size,
- enum dma_data_direction dma_dir);
+ unsigned long size);
void (*detach_dmabuf)(void *buf_priv);
int (*map_dmabuf)(void *buf_priv);
void (*unmap_dmabuf)(void *buf_priv);
- void *(*vaddr)(void *buf_priv);
- void *(*cookie)(void *buf_priv);
+ void *(*vaddr)(struct vb2_buffer *vb, void *buf_priv);
+ void *(*cookie)(struct vb2_buffer *vb, void *buf_priv);
unsigned int (*num_users)(void *buf_priv);
@@ -266,10 +267,10 @@ struct vb2_buffer {
* after the 'buf_finish' op is called.
* copied_timestamp: the timestamp of this capture buffer was copied
* from an output buffer.
- * need_cache_sync_on_prepare: when set buffer's ->prepare() function
- * performs cache sync/invalidation.
- * need_cache_sync_on_finish: when set buffer's ->finish() function
- * performs cache sync/invalidation.
+ * skip_cache_sync_on_prepare: when set buffer's ->prepare() function
+ * skips cache sync/invalidation.
+ * skip_cache_sync_on_finish: when set buffer's ->finish() function
+ * skips cache sync/invalidation.
* queued_entry: entry on the queued buffers list, which holds
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
@@ -280,8 +281,8 @@ struct vb2_buffer {
unsigned int synced:1;
unsigned int prepared:1;
unsigned int copied_timestamp:1;
- unsigned int need_cache_sync_on_prepare:1;
- unsigned int need_cache_sync_on_finish:1;
+ unsigned int skip_cache_sync_on_prepare:1;
+ unsigned int skip_cache_sync_on_finish:1;
struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
@@ -503,6 +504,8 @@ struct vb2_buf_ops {
* @allow_cache_hints: when set user-space can pass cache management hints in
* order to skip cache flush/invalidation on ->prepare() or/and
* ->finish().
+ * @non_coherent_mem: when set queue will attempt to allocate buffers using
+ * non-coherent memory.
* @lock: pointer to a mutex that protects the &struct vb2_queue. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -582,6 +585,7 @@ struct vb2_queue {
unsigned int uses_qbuf:1;
unsigned int uses_requests:1;
unsigned int allow_cache_hints:1;
+ unsigned int non_coherent_mem:1;
struct mutex *lock;
void *owner;
@@ -747,6 +751,8 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
* vb2_core_reqbufs() - Initiate streaming.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory type, as defined by &enum vb2_memory.
+ * @flags: auxiliary queue/buffer management flags. Currently, the only
+ * used flag is %V4L2_MEMORY_FLAG_NON_COHERENT.
* @count: requested buffer count.
*
* Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called
@@ -771,12 +777,13 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count);
+ unsigned int flags, unsigned int *count);
/**
* vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory type, as defined by &enum vb2_memory.
+ * @flags: auxiliary queue/buffer management flags.
* @count: requested buffer count.
* @requested_planes: number of planes requested.
* @requested_sizes: array with the size of the planes.
@@ -794,7 +801,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count,
+ unsigned int flags, unsigned int *count,
unsigned int requested_planes,
const unsigned int requested_sizes[]);
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index e3e770f76f34..77c694a19149 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -59,6 +59,7 @@ struct rpcif_op {
struct rpcif {
struct device *dev;
+ void __iomem *base;
void __iomem *dirmap;
struct regmap *regmap;
struct reset_control *rstc;
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 03614de86942..9c6ec78e47a5 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/9p.h
- *
* 9P protocol definitions.
*
* Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
@@ -32,13 +30,13 @@
*/
enum p9_debug_flags {
- P9_DEBUG_ERROR = (1<<0),
- P9_DEBUG_9P = (1<<2),
+ P9_DEBUG_ERROR = (1<<0),
+ P9_DEBUG_9P = (1<<2),
P9_DEBUG_VFS = (1<<3),
P9_DEBUG_CONV = (1<<4),
P9_DEBUG_MUX = (1<<5),
P9_DEBUG_TRANS = (1<<6),
- P9_DEBUG_SLABS = (1<<7),
+ P9_DEBUG_SLABS = (1<<7),
P9_DEBUG_FCALL = (1<<8),
P9_DEBUG_FID = (1<<9),
P9_DEBUG_PKT = (1<<10),
@@ -317,8 +315,8 @@ enum p9_qid_t {
};
/* 9P Magic Numbers */
-#define P9_NOTAG (u16)(~0)
-#define P9_NOFID (u32)(~0)
+#define P9_NOTAG ((u16)(~0))
+#define P9_NOFID ((u32)(~0))
#define P9_MAXWELEM 16
/* Minimal header size: size[4] type[1] tag[2] */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index e1c308d8d288..ec1d1706f43c 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/client.h
- *
* 9P Client Definitions
*
* Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
@@ -23,7 +21,7 @@
* @p9_proto_2000L: 9P2000.L extension
*/
-enum p9_proto_versions{
+enum p9_proto_versions {
p9_proto_legacy,
p9_proto_2000u,
p9_proto_2000L,
@@ -219,13 +217,13 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
u64 request_mask);
int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode,
- dev_t rdev, kgid_t gid, struct p9_qid *);
+ dev_t rdev, kgid_t gid, struct p9_qid *qid);
int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode,
- kgid_t gid, struct p9_qid *);
+ kgid_t gid, struct p9_qid *qid);
int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
void p9_fcall_fini(struct p9_fcall *fc);
-struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
+struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag);
static inline void p9_req_get(struct p9_req_t *r)
{
@@ -241,14 +239,18 @@ int p9_req_put(struct p9_req_t *r);
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
-int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
-int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
-void p9stat_free(struct p9_wstat *);
+int p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type,
+ int16_t *tag, int rewind);
+int p9stat_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_wstat *st);
+void p9stat_free(struct p9_wstat *stbuf);
int p9_is_proto_dotu(struct p9_client *clnt);
int p9_is_proto_dotl(struct p9_client *clnt);
-struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *);
-int p9_client_xattrcreate(struct p9_fid *, const char *, u64, int);
+struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ const char *attr_name, u64 *attr_size);
+int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
+ u64 attr_size, int flags);
int p9_client_readlink(struct p9_fid *fid, char **target);
int p9_client_init(void);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 3eb4261b2958..15a4e6a9dbf7 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/transport.h
- *
* Transport Definition
*
* Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
@@ -11,6 +9,8 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
+#include <linux/module.h>
+
#define P9_DEF_MIN_RESVPORT (665U)
#define P9_DEF_MAX_RESVPORT (1023U)
@@ -40,14 +40,16 @@ struct p9_trans_module {
int maxsize; /* max message size of transport */
int def; /* this transport should be default */
struct module *owner;
- int (*create)(struct p9_client *, const char *, char *);
- void (*close) (struct p9_client *);
- int (*request) (struct p9_client *, struct p9_req_t *req);
- int (*cancel) (struct p9_client *, struct p9_req_t *req);
- int (*cancelled)(struct p9_client *, struct p9_req_t *req);
- int (*zc_request)(struct p9_client *, struct p9_req_t *,
- struct iov_iter *, struct iov_iter *, int , int, int);
- int (*show_options)(struct seq_file *, struct p9_client *);
+ int (*create)(struct p9_client *client,
+ const char *devname, char *args);
+ void (*close)(struct p9_client *client);
+ int (*request)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancel)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancelled)(struct p9_client *client, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *client, struct p9_req_t *req,
+ struct iov_iter *uidata, struct iov_iter *uodata,
+ int inlen, int outlen, int in_hdr_len);
+ int (*show_options)(struct seq_file *m, struct p9_client *client);
};
void v9fs_register_trans(struct p9_trans_module *m);
@@ -55,4 +57,8 @@ void v9fs_unregister_trans(struct p9_trans_module *m);
struct p9_trans_module *v9fs_get_trans_by_name(char *s);
struct p9_trans_module *v9fs_get_default_trans(void);
void v9fs_put_trans(struct p9_trans_module *m);
+
+#define MODULE_ALIAS_9P(transport) \
+ MODULE_ALIAS("9p-" transport)
+
#endif /* NET_9P_TRANSPORT_H */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index f19f7f4a463c..b5b624c7e488 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -30,13 +30,13 @@ struct tc_action {
atomic_t tcfa_bindcnt;
int tcfa_action;
struct tcf_t tcfa_tm;
- struct gnet_stats_basic_packed tcfa_bstats;
- struct gnet_stats_basic_packed tcfa_bstats_hw;
+ struct gnet_stats_basic_sync tcfa_bstats;
+ struct gnet_stats_basic_sync tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
@@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
{
if (likely(a->cpu_bstats)) {
- bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
return;
}
spin_lock(&a->tcfa_lock);
diff --git a/include/net/amt.h b/include/net/amt.h
new file mode 100644
index 000000000000..7a4db8b903ee
--- /dev/null
+++ b/include/net/amt.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _NET_AMT_H_
+#define _NET_AMT_H_
+
+#include <linux/siphash.h>
+#include <linux/jhash.h>
+
+enum amt_msg_type {
+ AMT_MSG_DISCOVERY = 1,
+ AMT_MSG_ADVERTISEMENT,
+ AMT_MSG_REQUEST,
+ AMT_MSG_MEMBERSHIP_QUERY,
+ AMT_MSG_MEMBERSHIP_UPDATE,
+ AMT_MSG_MULTICAST_DATA,
+ AMT_MSG_TEARDOWM,
+ __AMT_MSG_MAX,
+};
+
+#define AMT_MSG_MAX (__AMT_MSG_MAX - 1)
+
+enum amt_ops {
+ /* A*B */
+ AMT_OPS_INT,
+ /* A+B */
+ AMT_OPS_UNI,
+ /* A-B */
+ AMT_OPS_SUB,
+ /* B-A */
+ AMT_OPS_SUB_REV,
+ __AMT_OPS_MAX,
+};
+
+#define AMT_OPS_MAX (__AMT_OPS_MAX - 1)
+
+enum amt_filter {
+ AMT_FILTER_FWD,
+ AMT_FILTER_D_FWD,
+ AMT_FILTER_FWD_NEW,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_FILTER_ALL,
+ AMT_FILTER_NONE_NEW,
+ AMT_FILTER_BOTH,
+ AMT_FILTER_BOTH_NEW,
+ __AMT_FILTER_MAX,
+};
+
+#define AMT_FILTER_MAX (__AMT_FILTER_MAX - 1)
+
+enum amt_act {
+ AMT_ACT_GMI,
+ AMT_ACT_GMI_ZERO,
+ AMT_ACT_GT,
+ AMT_ACT_STATUS_FWD_NEW,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ AMT_ACT_STATUS_NONE_NEW,
+ __AMT_ACT_MAX,
+};
+
+#define AMT_ACT_MAX (__AMT_ACT_MAX - 1)
+
+enum amt_status {
+ AMT_STATUS_INIT,
+ AMT_STATUS_SENT_DISCOVERY,
+ AMT_STATUS_RECEIVED_DISCOVERY,
+ AMT_STATUS_SENT_ADVERTISEMENT,
+ AMT_STATUS_RECEIVED_ADVERTISEMENT,
+ AMT_STATUS_SENT_REQUEST,
+ AMT_STATUS_RECEIVED_REQUEST,
+ AMT_STATUS_SENT_QUERY,
+ AMT_STATUS_RECEIVED_QUERY,
+ AMT_STATUS_SENT_UPDATE,
+ AMT_STATUS_RECEIVED_UPDATE,
+ __AMT_STATUS_MAX,
+};
+
+#define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1)
+
+struct amt_header {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 type:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 version:4,
+ type:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_header_discovery {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_advertisement {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+ __be32 ip4;
+} __packed;
+
+struct amt_header_request {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved1:7,
+ p:1,
+ reserved2:16;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ p:1,
+ reserved1:7,
+ reserved2:16;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_query {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:6,
+ l:1,
+ g:1,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ g:1,
+ l:1,
+ reserved:6,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_update {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:8,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ reserved:8,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_mcast_data {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type:4,
+ version:4,
+ reserved:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 version:4,
+ type:4,
+ reserved:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_advertisement advertisement;
+ struct amt_header_request request;
+ struct amt_header_membership_query query;
+ struct amt_header_membership_update update;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_gw_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_request request;
+ struct amt_header_membership_update update;
+ };
+} __packed;
+
+struct amt_relay_headers {
+ union {
+ struct amt_header_advertisement advertisement;
+ struct amt_header_membership_query query;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_skb_cb {
+ struct amt_tunnel_list *tunnel;
+};
+
+struct amt_tunnel_list {
+ struct list_head list;
+ /* Protect All resources under an amt_tunne_list */
+ spinlock_t lock;
+ struct amt_dev *amt;
+ u32 nr_groups;
+ u32 nr_sources;
+ enum amt_status status;
+ struct delayed_work gc_wq;
+ __be16 source_port;
+ __be32 ip4;
+ __be32 nonce;
+ siphash_key_t key;
+ u64 mac:48,
+ reserved:16;
+ struct rcu_head rcu;
+ struct hlist_head groups[];
+};
+
+union amt_addr {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+};
+
+/* RFC 3810
+ *
+ * When the router is in EXCLUDE mode, the router state is represented
+ * by the notation EXCLUDE (X,Y), where X is called the "Requested List"
+ * and Y is called the "Exclude List". All sources, except those from
+ * the Exclude List, will be forwarded by the router
+ */
+enum amt_source_status {
+ AMT_SOURCE_STATUS_NONE,
+ /* Node of Requested List */
+ AMT_SOURCE_STATUS_FWD,
+ /* Node of Exclude List */
+ AMT_SOURCE_STATUS_D_FWD,
+};
+
+/* protected by gnode->lock */
+struct amt_source_node {
+ struct hlist_node node;
+ struct amt_group_node *gnode;
+ struct delayed_work source_timer;
+ union amt_addr source_addr;
+ enum amt_source_status status;
+#define AMT_SOURCE_OLD 0
+#define AMT_SOURCE_NEW 1
+ u8 flags;
+ struct rcu_head rcu;
+};
+
+/* Protected by amt_tunnel_list->lock */
+struct amt_group_node {
+ struct amt_dev *amt;
+ union amt_addr group_addr;
+ union amt_addr host_addr;
+ bool v6;
+ u8 filter_mode;
+ u32 nr_sources;
+ struct amt_tunnel_list *tunnel_list;
+ struct hlist_node node;
+ struct delayed_work group_timer;
+ struct rcu_head rcu;
+ struct hlist_head sources[];
+};
+
+struct amt_dev {
+ struct net_device *dev;
+ struct net_device *stream_dev;
+ struct net *net;
+ /* Global lock for amt device */
+ spinlock_t lock;
+ /* Used only in relay mode */
+ struct list_head tunnel_list;
+ struct gro_cells gro_cells;
+
+ /* Protected by RTNL */
+ struct delayed_work discovery_wq;
+ /* Protected by RTNL */
+ struct delayed_work req_wq;
+ /* Protected by RTNL */
+ struct delayed_work secret_wq;
+ /* AMT status */
+ enum amt_status status;
+ /* Generated key */
+ siphash_key_t key;
+ struct socket __rcu *sock;
+ u32 max_groups;
+ u32 max_sources;
+ u32 hash_buckets;
+ u32 hash_seed;
+ /* Default 128 */
+ u32 max_tunnels;
+ /* Default 128 */
+ u32 nr_tunnels;
+ /* Gateway or Relay mode */
+ u32 mode;
+ /* Default 2268 */
+ __be16 relay_port;
+ /* Default 2268 */
+ __be16 gw_port;
+ /* Outer local ip */
+ __be32 local_ip;
+ /* Outer remote ip */
+ __be32 remote_ip;
+ /* Outer discovery ip */
+ __be32 discovery_ip;
+ /* Only used in gateway mode */
+ __be32 nonce;
+ /* Gateway sent request and received query */
+ bool ready4;
+ bool ready6;
+ u8 req_cnt;
+ u8 qi;
+ u64 qrv;
+ u64 qri;
+ /* Used only in gateway mode */
+ u64 mac:48,
+ reserved:16;
+};
+
+#define AMT_TOS 0xc0
+#define AMT_IPHDR_OPTS 4
+#define AMT_IP6HDR_OPTS 8
+#define AMT_GC_INTERVAL (30 * 1000)
+#define AMT_MAX_GROUP 32
+#define AMT_MAX_SOURCE 128
+#define AMT_HSIZE_SHIFT 8
+#define AMT_HSIZE (1 << AMT_HSIZE_SHIFT)
+
+#define AMT_DISCOVERY_TIMEOUT 5000
+#define AMT_INIT_REQ_TIMEOUT 1
+#define AMT_INIT_QUERY_INTERVAL 125
+#define AMT_MAX_REQ_TIMEOUT 120
+#define AMT_MAX_REQ_COUNT 3
+#define AMT_SECRET_TIMEOUT 60000
+#define IANA_AMT_UDP_PORT 2268
+#define AMT_MAX_TUNNELS 128
+#define AMT_MAX_REQS 128
+#define AMT_GW_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_gw_headers))
+#define AMT_RELAY_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_relay_headers))
+
+static inline bool netif_is_amt(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "amt");
+}
+
+static inline u64 amt_gmi(const struct amt_dev *amt)
+{
+ return ((amt->qrv * amt->qi) + amt->qri) * 1000;
+}
+
+#endif /* _NET_AMT_H_ */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 8b7eb46ad72d..03d409de61ad 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -304,7 +304,7 @@ extern spinlock_t ax25_list_lock;
void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *,
struct net_device *);
void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
void ax25_destroy_socket(ax25_cb *);
@@ -384,10 +384,11 @@ struct ax25_linkfail {
void ax25_linkfail_register(struct ax25_linkfail *lf);
void ax25_linkfail_release(struct ax25_linkfail *lf);
-int __must_check ax25_listen_register(ax25_address *, struct net_device *);
-void ax25_listen_release(ax25_address *, struct net_device *);
+int __must_check ax25_listen_register(const ax25_address *,
+ struct net_device *);
+void ax25_listen_release(const ax25_address *, struct net_device *);
int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-int ax25_listen_mine(ax25_address *, struct net_device *);
+int ax25_listen_mine(const ax25_address *, struct net_device *);
void ax25_link_failed(ax25_cb *, int);
int ax25_protocol_is_registered(unsigned int);
@@ -401,8 +402,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
- ax25_digi *, struct net_device *);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *,
+ ax25_address *, ax25_digi *, struct net_device *);
void ax25_output(ax25_cb *, int, struct sk_buff *);
void ax25_kick(ax25_cb *);
void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 9125effbf448..3271870fd85e 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -153,6 +153,30 @@ struct bt_voice {
#define BT_SCM_PKT_STATUS 0x03
+#define BT_CODEC 19
+
+struct bt_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct bt_codec {
+ __u8 id;
+ __u16 cid;
+ __u16 vid;
+ __u8 data_path;
+ __u8 num_caps;
+} __packed;
+
+struct bt_codecs {
+ __u8 num_codecs;
+ struct bt_codec codecs[];
+} __packed;
+
+#define BT_CODEC_CVSD 0x02
+#define BT_CODEC_TRANSPARENT 0x03
+#define BT_CODEC_MSBC 0x05
+
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
@@ -420,6 +444,72 @@ out:
return NULL;
}
+/* Shall not be called with lock_sock held */
+static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb;
+ size_t size = min_t(size_t, len, mtu);
+ int err;
+
+ skb = bt_skb_send_alloc(sk, size + headroom + tailroom,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(err);
+
+ skb_reserve(skb, headroom);
+ skb_tailroom_reserve(skb, mtu, tailroom);
+
+ if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) {
+ kfree_skb(skb);
+ return ERR_PTR(-EFAULT);
+ }
+
+ skb->priority = sk->sk_priority;
+
+ return skb;
+}
+
+/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments
+ * accourding to the MTU.
+ */
+static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb, **frag;
+
+ skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR_OR_NULL(skb))
+ return skb;
+
+ len -= skb->len;
+ if (!len)
+ return skb;
+
+ /* Add remaining data over MTU as continuation fragments */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
+
+ tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(tmp)) {
+ kfree_skb(skb);
+ return tmp;
+ }
+
+ len -= tmp->len;
+
+ *frag = tmp;
+ frag = &(*frag)->next;
+ }
+
+ return skb;
+}
+
int bt_to_errno(u16 code);
void hci_sock_set_flag(struct sock *sk, int nr);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index b80415011dcd..63065bc01b76 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -330,6 +330,8 @@ enum {
HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
+ HCI_QUALITY_REPORT,
+ HCI_OFFLOAD_CODECS_ENABLED,
__HCI_NUM_FLAGS,
};
@@ -871,6 +873,40 @@ struct hci_cp_logical_link_cancel {
__u8 flow_spec_id;
} __packed;
+#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
+struct hci_coding_format {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+} __packed;
+
+struct hci_cp_enhanced_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ struct hci_coding_format tx_coding_format;
+ struct hci_coding_format rx_coding_format;
+ __le16 tx_codec_frame_size;
+ __le16 rx_codec_frame_size;
+ __le32 in_bandwidth;
+ __le32 out_bandwidth;
+ struct hci_coding_format in_coding_format;
+ struct hci_coding_format out_coding_format;
+ __le16 in_coded_data_size;
+ __le16 out_coded_data_size;
+ __u8 in_pcm_data_format;
+ __u8 out_pcm_data_format;
+ __u8 in_pcm_sample_payload_msb_pos;
+ __u8 out_pcm_sample_payload_msb_pos;
+ __u8 in_data_path;
+ __u8 out_data_path;
+ __u8 in_transport_unit_size;
+ __u8 out_transport_unit_size;
+ __le16 max_latency;
+ __le16 pkt_type;
+ __u8 retrans_effort;
+} __packed;
+
struct hci_rp_logical_link_cancel {
__u8 status;
__u8 phy_handle;
@@ -1250,6 +1286,14 @@ struct hci_rp_read_local_oob_ext_data {
__u8 rand256[16];
} __packed;
+#define HCI_CONFIGURE_DATA_PATH 0x0c83
+struct hci_op_configure_data_path {
+ __u8 direction;
+ __u8 data_path_id;
+ __u8 vnd_len;
+ __u8 vnd_data[];
+} __packed;
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -1307,6 +1351,28 @@ struct hci_rp_read_data_block_size {
} __packed;
#define HCI_OP_READ_LOCAL_CODECS 0x100b
+struct hci_std_codecs {
+ __u8 num;
+ __u8 codec[];
+} __packed;
+
+struct hci_vnd_codec {
+ /* company id */
+ __le16 cid;
+ /* vendor codec id */
+ __le16 vid;
+} __packed;
+
+struct hci_vnd_codecs {
+ __u8 num;
+ struct hci_vnd_codec codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs {
+ __u8 status;
+ struct hci_std_codecs std_codecs;
+ struct hci_vnd_codecs vnd_codecs;
+} __packed;
#define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c
struct hci_rp_read_local_pairing_opts {
@@ -1315,6 +1381,54 @@ struct hci_rp_read_local_pairing_opts {
__u8 max_key_size;
} __packed;
+#define HCI_OP_READ_LOCAL_CODECS_V2 0x100d
+struct hci_std_codec_v2 {
+ __u8 id;
+ __u8 transport;
+} __packed;
+
+struct hci_std_codecs_v2 {
+ __u8 num;
+ struct hci_std_codec_v2 codec[];
+} __packed;
+
+struct hci_vnd_codec_v2 {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+} __packed;
+
+struct hci_vnd_codecs_v2 {
+ __u8 num;
+ struct hci_vnd_codec_v2 codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs_v2 {
+ __u8 status;
+ struct hci_std_codecs_v2 std_codecs;
+ struct hci_vnd_codecs_v2 vendor_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e
+struct hci_op_read_local_codec_caps {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+ __u8 direction;
+} __packed;
+
+struct hci_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct hci_rp_read_local_codec_caps {
+ __u8 status;
+ __u8 num_caps;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
struct hci_rp_read_page_scan_activity {
__u8 status;
@@ -2551,6 +2665,9 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14)
+/* codec transport types */
+#define HCI_TRANSPORT_SCO_ESCO 0x01
+
/* le24 support */
static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
{
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a7360c8c72f8..dd8840e70e25 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -131,6 +131,17 @@ struct bdaddr_list {
u8 bdaddr_type;
};
+struct codec_list {
+ struct list_head list;
+ u8 id;
+ __u16 cid;
+ __u16 vid;
+ u8 transport;
+ u8 num_caps;
+ u32 len;
+ struct hci_codec_caps caps[];
+};
+
struct bdaddr_list_with_irk {
struct list_head list;
bdaddr_t bdaddr;
@@ -536,6 +547,7 @@ struct hci_dev {
struct list_head pend_le_conns;
struct list_head pend_le_reports;
struct list_head blocked_keys;
+ struct list_head local_codecs;
struct hci_dev_stats stat;
@@ -605,7 +617,12 @@ struct hci_dev {
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
void (*cmd_timeout)(struct hci_dev *hdev);
- bool (*prevent_wake)(struct hci_dev *hdev);
+ bool (*wakeup)(struct hci_dev *hdev);
+ int (*set_quality_report)(struct hci_dev *hdev, bool enable);
+ int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
+ int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -699,6 +716,7 @@ struct hci_conn {
struct amp_mgr *amp_mgr;
struct hci_conn *link;
+ struct bt_codec codec;
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
@@ -760,6 +778,7 @@ extern struct mutex hci_cb_list_lock;
hci_dev_clear_flag(hdev, HCI_LE_ADV); \
hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
} while (0)
/* ----- HCI interface to upper protocols ----- */
@@ -1099,13 +1118,14 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
- u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role, bdaddr_t *direct_rpa);
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role,
+ bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting);
+ __u16 setting, struct bt_codec *codec);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -1360,6 +1380,8 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
u16 scan_rsp_len, u8 *scan_rsp_data);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
+u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
+bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
void hci_adv_monitors_clear(struct hci_dev *hdev);
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
@@ -1442,6 +1464,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
/* Use LL Privacy based address resolution if supported */
#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+/* Use enhanced synchronous connection if command is supported */
+#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08)
+
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
@@ -1609,43 +1634,6 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
mutex_unlock(&hci_cb_list_lock);
}
-static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type,
- size_t *data_len)
-{
- size_t parsed = 0;
-
- if (eir_len < 2)
- return NULL;
-
- while (parsed < eir_len - 1) {
- u8 field_len = eir[0];
-
- if (field_len == 0)
- break;
-
- parsed += field_len + 1;
-
- if (parsed > eir_len)
- break;
-
- if (eir[1] != type) {
- eir += field_len + 1;
- continue;
- }
-
- /* Zero length data */
- if (field_len == 1)
- return NULL;
-
- if (data_len)
- *data_len = field_len - 1;
-
- return &eir[2];
- }
-
- return NULL;
-}
-
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
{
if (addr_type != ADDR_LE_DEV_RANDOM)
@@ -1867,4 +1855,9 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
#define SCO_AIRMODE_CVSD 0x0000
#define SCO_AIRMODE_TRANSP 0x0003
+#define LOCAL_CODEC_ACL_MASK BIT(0)
+#define LOCAL_CODEC_SCO_MASK BIT(1)
+
+#define TRANSPORT_TYPE_MAX 0x04
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 40296ed976a9..4202c609bb0b 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -130,7 +130,8 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
sk_rx_queue_set(sk, skb);
}
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 62dd8422e0dc..423f97b982ff 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -740,6 +740,22 @@ struct cfg80211_tid_config {
};
/**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
+};
+
+/**
* cfg80211_get_chandef_type - return old channel type from chandef
* @chandef: the channel definition
*
@@ -1041,6 +1057,36 @@ struct cfg80211_crypto_settings {
};
/**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+};
+
+/**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[];
+};
+
+/**
* struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
* or %NULL if not changed
@@ -1058,6 +1104,7 @@ struct cfg80211_crypto_settings {
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
@@ -1075,6 +1122,7 @@ struct cfg80211_beacon_data {
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
s8 ftm_responder;
size_t head_len, tail_len;
@@ -1189,6 +1237,7 @@ enum cfg80211_ap_settings_flags {
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1221,6 +1270,7 @@ struct cfg80211_ap_settings {
struct cfg80211_he_bss_color he_bss_color;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
};
/**
@@ -4018,6 +4068,10 @@ struct mgmt_frame_regs {
* @set_sar_specs: Update the SAR (TX power) settings.
*
* @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4348,6 +4402,8 @@ struct cfg80211_ops {
int (*color_change)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
};
/*
@@ -4981,6 +5037,13 @@ struct wiphy_iftype_akm_suites {
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
* @sar_capa: SAR control capabilities
* @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
*/
struct wiphy {
struct mutex mtx;
@@ -5125,6 +5188,9 @@ struct wiphy {
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -5376,7 +5442,6 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
- * @mgmt_registrations_lock: lock for the list
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
* @mtx: mutex used to lock data in this struct, may be used by drivers
@@ -5423,7 +5488,6 @@ struct wireless_dev {
u32 identifier;
struct list_head mgmt_registrations;
- spinlock_t mgmt_registrations_lock;
u8 mgmt_registrations_need_update:1;
struct mutex mtx;
@@ -5492,7 +5556,7 @@ struct wireless_dev {
unsigned long unprot_beacon_reported;
};
-static inline u8 *wdev_address(struct wireless_dev *wdev)
+static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
@@ -6311,6 +6375,17 @@ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
}
/**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band);
+
+/**
* cfg80211_is_element_inherited - returns if element ID should be inherited
* @element: element to check
* @non_inherit_element: non inheritance element
diff --git a/include/net/codel.h b/include/net/codel.h
index a6e428f80135..a6c9e34e62b8 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -102,6 +102,9 @@ static inline u32 codel_time_to_us(codel_time_t val)
* @interval: width of moving time window
* @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
+ * @ce_threshold_selector: apply ce_threshold to packets matching this value
+ * in the diffserv/ECN byte of the IP header
+ * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison
*/
struct codel_params {
codel_time_t target;
@@ -109,6 +112,8 @@ struct codel_params {
codel_time_t interval;
u32 mtu;
bool ecn;
+ u8 ce_threshold_selector;
+ u8 ce_threshold_mask;
};
/**
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
index d289b91dcd65..137d40d8cbeb 100644
--- a/include/net/codel_impl.h
+++ b/include/net/codel_impl.h
@@ -54,6 +54,8 @@ static void codel_params_init(struct codel_params *params)
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ce_threshold_mask = 0;
+ params->ce_threshold_selector = 0;
params->ecn = false;
}
@@ -246,9 +248,19 @@ static struct sk_buff *codel_dequeue(void *ctx,
vars->rec_inv_sqrt);
}
end:
- if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
- INET_ECN_set_ce(skb))
- stats->ce_mark++;
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
+ bool set_ce = true;
+
+ if (params->ce_threshold_mask) {
+ int dsfield = skb_get_dsfield(skb);
+
+ set_ce = (dsfield >= 0 &&
+ (((u8)dsfield & params->ce_threshold_mask) ==
+ params->ce_threshold_selector));
+ }
+ if (set_ce && INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ }
return skb;
}
diff --git a/include/net/datalink.h b/include/net/datalink.h
index a9663229b913..d9b7faaa539f 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -12,7 +12,7 @@ struct datalink_proto {
int (*rcvfunc)(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
int (*request)(struct datalink_proto *, struct sk_buff *,
- unsigned char *);
+ const unsigned char *);
struct list_head node;
};
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 154cf0dbca37..aab3d007c577 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -21,45 +21,7 @@
#include <linux/xarray.h>
#include <linux/firmware.h>
-#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
- (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
-
-struct devlink_dev_stats {
- u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
- u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
-};
-
-struct devlink_ops;
-
-struct devlink {
- u32 index;
- struct list_head port_list;
- struct list_head rate_list;
- struct list_head sb_list;
- struct list_head dpipe_table_list;
- struct list_head resource_list;
- struct list_head param_list;
- struct list_head region_list;
- struct list_head reporter_list;
- struct mutex reporters_lock; /* protects reporter_list */
- struct devlink_dpipe_headers *dpipe_headers;
- struct list_head trap_list;
- struct list_head trap_group_list;
- struct list_head trap_policer_list;
- const struct devlink_ops *ops;
- struct xarray snapshot_ids;
- struct devlink_dev_stats stats;
- struct device *dev;
- possible_net_t _net;
- struct mutex lock; /* Serializes access to devlink instance specific objects such as
- * port, sb, dpipe, resource, params, region, traps and more.
- */
- u8 reload_failed:1,
- reload_enabled:1;
- refcount_t refcount;
- struct completion comp;
- char priv[0] __aligned(NETDEV_ALIGN);
-};
+struct devlink;
struct devlink_port_phys_attrs {
u32 port_number; /* Same value as "split group".
@@ -506,7 +468,6 @@ struct devlink_param_item {
const struct devlink_param *param;
union devlink_param_value driverinit_value;
bool driverinit_value_valid;
- bool published;
};
enum devlink_param_generic_id {
@@ -1224,6 +1185,11 @@ enum devlink_trap_group_generic_id {
.min_burst = _min_burst, \
}
+enum {
+ /* device supports reload operations */
+ DEVLINK_F_RELOAD = 1UL << 0,
+};
+
struct devlink_ops {
/**
* @supported_flash_update_params:
@@ -1520,34 +1486,9 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
};
-static inline void *devlink_priv(struct devlink *devlink)
-{
- BUG_ON(!devlink);
- return &devlink->priv;
-}
-
-static inline struct devlink *priv_to_devlink(void *priv)
-{
- BUG_ON(!priv);
- return container_of(priv, struct devlink, priv);
-}
-
-static inline struct devlink_port *
-netdev_to_devlink_port(struct net_device *dev)
-{
- if (dev->netdev_ops->ndo_get_devlink_port)
- return dev->netdev_ops->ndo_get_devlink_port(dev);
- return NULL;
-}
-
-static inline struct devlink *netdev_to_devlink(struct net_device *dev)
-{
- struct devlink_port *devlink_port = netdev_to_devlink_port(dev);
-
- if (devlink_port)
- return devlink_port->devlink;
- return NULL;
-}
+void *devlink_priv(struct devlink *devlink);
+struct devlink *priv_to_devlink(void *priv);
+struct device *devlink_to_dev(const struct devlink *devlink);
struct ib_device;
@@ -1566,10 +1507,9 @@ static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
{
return devlink_alloc_ns(ops, priv_size, &init_net, dev);
}
-int devlink_register(struct devlink *devlink);
+void devlink_set_features(struct devlink *devlink, u64 features);
+void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
-void devlink_reload_enable(struct devlink *devlink);
-void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
@@ -1651,34 +1591,11 @@ int devlink_param_register(struct devlink *devlink,
const struct devlink_param *param);
void devlink_param_unregister(struct devlink *devlink,
const struct devlink_param *param);
-void devlink_params_publish(struct devlink *devlink);
-void devlink_params_unpublish(struct devlink *devlink);
-void devlink_param_publish(struct devlink *devlink,
- const struct devlink_param *param);
-void devlink_param_unpublish(struct devlink *devlink,
- const struct devlink_param *param);
-int devlink_port_params_register(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
-void devlink_port_params_unregister(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val);
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
-int
-devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value *init_val);
-int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
-void devlink_port_param_value_changed(struct devlink_port *devlink_port,
- u32 param_id);
-void devlink_param_value_str_fill(union devlink_param_value *dst_val,
- const char *src);
struct devlink_region *
devlink_region_create(struct devlink *devlink,
const struct devlink_region_ops *ops,
@@ -1723,10 +1640,7 @@ int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
const char *name);
int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
-int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value);
-int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
-int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
u16 value_len);
@@ -1812,9 +1726,12 @@ devlink_trap_policers_unregister(struct devlink *devlink,
#if IS_ENABLED(CONFIG_NET_DEVLINK)
-void devlink_compat_running_version(struct net_device *dev,
+struct devlink *__must_check devlink_try_get(struct devlink *devlink);
+void devlink_put(struct devlink *devlink);
+
+void devlink_compat_running_version(struct devlink *devlink,
char *buf, size_t len);
-int devlink_compat_flash_update(struct net_device *dev, const char *file_name);
+int devlink_compat_flash_update(struct devlink *devlink, const char *file_name);
int devlink_compat_phys_port_name_get(struct net_device *dev,
char *name, size_t len);
int devlink_compat_switch_id_get(struct net_device *dev,
@@ -1822,13 +1739,22 @@ int devlink_compat_switch_id_get(struct net_device *dev,
#else
+static inline struct devlink *devlink_try_get(struct devlink *devlink)
+{
+ return NULL;
+}
+
+static inline void devlink_put(struct devlink *devlink)
+{
+}
+
static inline void
-devlink_compat_running_version(struct net_device *dev, char *buf, size_t len)
+devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len)
{
}
static inline int
-devlink_compat_flash_update(struct net_device *dev, const char *file_name)
+devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
{
return -EOPNOTSUPP;
}
diff --git a/include/net/dn.h b/include/net/dn.h
index 56ab0726c641..ba9655b0098a 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -166,7 +166,7 @@ struct dn_skb_cb {
int iif;
};
-static inline __le16 dn_eth2dn(unsigned char *ethaddr)
+static inline __le16 dn_eth2dn(const unsigned char *ethaddr)
{
return get_unaligned((__le16 *)(ethaddr + 4));
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index d784e76113b8..eff5c44ba377 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -51,6 +51,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_SEVILLE_VALUE 21
#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
#define DSA_TAG_PROTO_SJA1110_VALUE 23
+#define DSA_TAG_PROTO_RTL8_4_VALUE 24
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -77,6 +78,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
+ DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
};
struct dsa_switch;
@@ -285,6 +287,7 @@ struct dsa_port {
/* List of MAC addresses that must be forwarded on this port.
* These are only valid on CPU ports and DSA links.
*/
+ struct mutex addr_lists_lock;
struct list_head fdbs;
struct list_head mdbs;
@@ -472,14 +475,41 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
}
+#define dsa_tree_for_each_user_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_port(_dp, _ds) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
+ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
+ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_available_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (!dsa_port_is_unused((_dp)))
+
+#define dsa_switch_for_each_user_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_cpu_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
static inline u32 dsa_user_ports(struct dsa_switch *ds)
{
+ struct dsa_port *dp;
u32 mask = 0;
- int p;
- for (p = 0; p < ds->num_ports; p++)
- if (dsa_is_user_port(ds, p))
- mask |= BIT(p);
+ dsa_switch_for_each_user_port(dp, ds)
+ mask |= BIT(dp->index);
return mask;
}
@@ -615,6 +645,8 @@ struct dsa_switch_ops {
/*
* PHYLINK integration
*/
+ void (*phylink_get_interfaces)(struct dsa_switch *ds, int port,
+ unsigned long *supported_interfaces);
void (*phylink_validate)(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state);
@@ -645,6 +677,12 @@ struct dsa_switch_ops {
int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
+ void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
void (*get_stats64)(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s);
void (*self_test)(struct dsa_switch *ds, int port,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index ffd386ea0dbb..aa33e1092e2c 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -287,6 +287,7 @@ enum flow_dissector_key_id {
#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1)
#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2)
+#define FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP BIT(3)
struct flow_dissector_key {
enum flow_dissector_key_id key_id;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 1424e02cef90..7aa2b8e1fb29 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,14 +7,17 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
-/* Note: this used to be in include/uapi/linux/gen_stats.h */
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u64 packets;
-};
-
-struct gnet_stats_basic_cpu {
- struct gnet_stats_basic_packed bstats;
+/* Throughput stats.
+ * Must be initialized beforehand with gnet_stats_basic_sync_init().
+ *
+ * If no reads can ever occur parallel to writes (e.g. stack-allocated
+ * bstats), then the internal stat values can be written to and read
+ * from directly. Otherwise, use _bstats_set/update() for writes and
+ * gnet_stats_add_basic() for reads.
+ */
+struct gnet_stats_basic_sync {
+ u64_stats_t bytes;
+ u64_stats_t packets;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
@@ -34,6 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats;
};
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);
@@ -42,41 +46,38 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
-int gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-void __gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-int gnet_stats_copy_basic_hw(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+int gnet_stats_copy_basic_hw(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
-void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *cpu_q,
- const struct gnet_stats_queue *q, __u32 qlen);
+void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
struct gnet_stats_rate_est64 *sample);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index b06c2d02ec84..fa6a87246a7b 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -289,7 +289,7 @@ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{
/* The below has to be done to allow calling inet_csk_destroy_sock */
sock_set_flag(sk, SOCK_DEAD);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
}
void inet_csk_destroy_sock(struct sock *sk);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index ba77f47ef61e..ea32393464a2 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -188,6 +188,23 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
return 0;
}
+static inline int skb_get_dsfield(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ break;
+ return ipv4_get_dsfield(ip_hdr(skb));
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ break;
+ return ipv6_get_dsfield(ipv6_hdr(skb));
+ }
+
+ return -1;
+}
+
static inline int INET_ECN_set_ect1(struct sk_buff *skb)
{
switch (skb_protocol(skb, true)) {
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 89163ef8cf4b..9e1111f5915b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -207,11 +207,10 @@ struct inet_sock {
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
+ struct ip_options_rcu __rcu *inet_opt;
__be16 inet_sport;
__u16 inet_id;
- struct ip_options_rcu __rcu *inet_opt;
- int rx_dst_ifindex;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
index 3c2993bc48c8..3f45ba37a2c6 100644
--- a/include/net/ioam6.h
+++ b/include/net/ioam6.h
@@ -56,7 +56,8 @@ static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net)
struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id);
void ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_namespace *ns,
- struct ioam6_trace_hdr *trace);
+ struct ioam6_trace_hdr *trace,
+ bool is_input);
int ioam6_init(void);
void ioam6_exit(void);
diff --git a/include/net/ip.h b/include/net/ip.h
index 9192444f2964..b71e88507c4a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -24,6 +24,7 @@
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/sockptr.h>
+#include <linux/static_key.h>
#include <net/inet_sock.h>
#include <net/route.h>
@@ -291,7 +292,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
+static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
@@ -746,6 +751,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
+DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7cb5a1aace40..ff1804a0c469 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -931,6 +931,7 @@ struct netns_ipvs {
int sysctl_conn_reuse_mode;
int sysctl_schedule_icmp;
int sysctl_ignore_tunneled;
+ int sysctl_run_estimation;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -1071,6 +1072,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return ipvs->sysctl_cache_bypass;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_run_estimation;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1163,6 +1169,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
#endif
/* IPVS core functions
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f2d0ecc257bb..c19bf51ded1d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1092,6 +1092,7 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
/*
* socket options (ipv6_sockglue.c)
*/
+DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount);
int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
diff --git a/include/net/llc.h b/include/net/llc.h
index df282d9b4017..e250dca03963 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -72,7 +72,9 @@ struct llc_sap {
static inline
struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex)
{
- return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES];
+ u32 bucket = hash_32(ifindex, LLC_SK_DEV_HASH_BITS);
+
+ return &sap->sk_dev_hash[bucket];
}
static inline
@@ -133,7 +135,7 @@ static inline void llc_sap_put(struct llc_sap *sap)
struct llc_sap *llc_sap_find(unsigned char sap_value);
int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap);
+ const unsigned char *dmac, unsigned char dsap);
void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index 8d5c543cd620..c72570a21a4f 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,7 +62,8 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac,
+ u8 dsap);
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
int llc_send_disc(struct sock *sk);
#endif /* LLC_IF_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 618d1f427cb2..dd757f0987b0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -632,6 +632,10 @@ struct ieee80211_fils_discovery {
* @s1g: BSS is S1G BSS (affects Association Request format).
* @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
* to driver when rate control is offloaded to firmware.
+ * @power_type: power type of BSS for 6 GHz
+ * @tx_pwr_env: transmit power envelope array of BSS.
+ * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @pwr_reduction: power constraint of BSS.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -702,6 +706,10 @@ struct ieee80211_bss_conf {
u32 unsol_bcast_probe_resp_interval;
bool s1g;
struct cfg80211_bitrate_mask beacon_tx_rate;
+ enum ieee80211_ap_reg_power power_type;
+ struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
+ u8 tx_pwr_env_num;
+ u8 pwr_reduction;
};
/**
@@ -1715,6 +1723,7 @@ enum ieee80211_offload_flags {
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
* @color_change_color: the bss color that will be used after the change.
+ * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1746,6 +1755,8 @@ struct ieee80211_vif {
bool color_change_active;
u8 color_change_color;
+ struct ieee80211_vif *mbssid_tx_vif;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
diff --git a/include/net/mctp.h b/include/net/mctp.h
index a824d47c3c6d..7e35ec79b909 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#include <linux/mctp.h>
+#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -54,43 +55,57 @@ struct mctp_sock {
struct sock sk;
/* bind() params */
- int bind_net;
+ unsigned int bind_net;
mctp_eid_t bind_addr;
__u8 bind_type;
+ /* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
+ bool addr_ext;
+
/* list of mctp_sk_key, for incoming tag lookup. updates protected
* by sk->net->keys_lock
*/
struct hlist_head keys;
+
+ /* mechanism for expiring allocated keys; will release an allocated
+ * tag, and any netdev state for a request/response pairing
+ */
+ struct timer_list key_expiry;
};
/* Key for matching incoming packets to sockets or reassembly contexts.
* Packets are matched on (src,dest,tag).
*
- * Lifetime requirements:
+ * Lifetime / locking requirements:
+ *
+ * - individual key data (ie, the struct itself) is protected by key->lock;
+ * changes must be made with that lock held.
+ *
+ * - the lookup fields: peer_addr, local_addr and tag are set before the
+ * key is added to lookup lists, and never updated.
*
- * - keys are free()ed via RCU
+ * - A ref to the key must be held (throuh key->refs) if a pointer to the
+ * key is to be accessed after key->lock is released.
*
* - a mctp_sk_key contains a reference to a struct sock; this is valid
* for the life of the key. On sock destruction (through unhash), the key is
- * removed from lists (see below), and will not be observable after a RCU
- * grace period.
- *
- * any RX occurring within that grace period may still queue to the socket,
- * but will hit the SOCK_DEAD case before the socket is freed.
+ * removed from lists (see below), and marked invalid.
*
* - these mctp_sk_keys appear on two lists:
* 1) the struct mctp_sock->keys list
* 2) the struct netns_mctp->keys list
*
- * updates to either list are performed under the netns_mctp->keys
- * lock.
+ * presences on these lists requires a (single) refcount to be held; both
+ * lists are updated as a single operation.
+ *
+ * Updates and lookups in either list are performed under the
+ * netns_mctp->keys lock. Lookup functions will need to lock the key and
+ * take a reference before unlocking the keys_lock. Consequently, the list's
+ * keys_lock *cannot* be acquired with the individual key->lock held.
*
* - a key may have a sk_buff attached as part of an in-progress message
- * reassembly (->reasm_head). The reassembly context is protected by
- * reasm_lock, which may be acquired with the keys lock (above) held, if
- * necessary. Consequently, keys lock *cannot* be acquired with the
- * reasm_lock held.
+ * reassembly (->reasm_head). The reasm data is protected by the individual
+ * key->lock.
*
* - there are two destruction paths for a mctp_sk_key:
*
@@ -101,6 +116,8 @@ struct mctp_sock {
* the (complete) reply, or during reassembly errors. Here, we clean up
* the reassembly context (marking reasm_dead, to prevent another from
* starting), and remove the socket from the netns & socket lists.
+ *
+ * - through an expiry timeout, on a per-socket timer
*/
struct mctp_sk_key {
mctp_eid_t peer_addr;
@@ -116,20 +133,40 @@ struct mctp_sk_key {
/* per-socket list */
struct hlist_node sklist;
+ /* lock protects against concurrent updates to the reassembly and
+ * expiry data below.
+ */
+ spinlock_t lock;
+
+ /* Keys are referenced during the output path, which may sleep */
+ refcount_t refs;
+
/* incoming fragment reassembly context */
- spinlock_t reasm_lock;
struct sk_buff *reasm_head;
struct sk_buff **reasm_tailp;
bool reasm_dead;
u8 last_seq;
- struct rcu_head rcu;
+ /* key validity */
+ bool valid;
+
+ /* expiry timeout; valid (above) cleared on expiry */
+ unsigned long expiry;
+
+ /* free to use for device flow state tracking. Initialised to
+ * zero on initial key creation
+ */
+ unsigned long dev_flow_state;
+ struct mctp_dev *dev;
};
struct mctp_skb_cb {
unsigned int magic;
unsigned int net;
+ int ifindex; /* extended/direct addressing if set */
mctp_eid_t src;
+ unsigned char halen;
+ unsigned char haddr[MAX_ADDR_LEN];
};
/* skb control-block accessors with a little extra debugging for initial
@@ -153,10 +190,18 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
{
struct mctp_skb_cb *cb = (void *)skb->cb;
+ BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
WARN_ON(cb->magic != 0x4d435450);
return (void *)(skb->cb);
}
+/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
+ * indicating the flow to the device driver.
+ */
+struct mctp_flow {
+ struct mctp_sk_key *key;
+};
+
/* Route definition.
*
* These are held in the pernet->mctp.routes list, with RCU protection for
@@ -165,8 +210,7 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
*
* Updates to the route table are performed under rtnl; all reads under RCU,
* so routes cannot be referenced over a RCU grace period. Specifically: A
- * caller cannot block between mctp_route_lookup and passing the route to
- * mctp_do_route.
+ * caller cannot block between mctp_route_lookup and mctp_route_release()
*/
struct mctp_route {
mctp_eid_t min, max;
@@ -186,11 +230,11 @@ struct mctp_route {
struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
mctp_eid_t daddr);
-int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb);
-
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+void mctp_key_unref(struct mctp_sk_key *key);
+
/* routing <--> device interface */
unsigned int mctp_default_net(struct net *net);
int mctp_default_net_set(struct net *net, unsigned int index);
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
index 71a11012fac7..5c0d04b5c12c 100644
--- a/include/net/mctpdevice.h
+++ b/include/net/mctpdevice.h
@@ -14,11 +14,17 @@
#include <linux/types.h>
#include <linux/refcount.h>
+struct mctp_sk_key;
+
struct mctp_dev {
struct net_device *dev;
+ refcount_t refs;
+
unsigned int net;
+ const struct mctp_netdev_ops *ops;
+
/* Only modified under RTNL. Reads have addrs_lock held */
u8 *addrs;
size_t num_addrs;
@@ -27,9 +33,24 @@ struct mctp_dev {
struct rcu_head rcu;
};
+struct mctp_netdev_ops {
+ void (*release_flow)(struct mctp_dev *dev,
+ struct mctp_sk_key *key);
+};
+
#define MCTP_INITIAL_DEFAULT_NET 1
struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
+int mctp_register_netdev(struct net_device *dev,
+ const struct mctp_netdev_ops *ops);
+void mctp_unregister_netdev(struct net_device *dev);
+
+void mctp_dev_hold(struct mctp_dev *mdev);
+void mctp_dev_put(struct mctp_dev *mdev);
+
+void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+
#endif /* __NET_MCTPDEVICE_H */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 6026bbefbffd..a925349b4b89 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -12,6 +12,8 @@
#include <linux/tcp.h>
#include <linux/types.h>
+struct mptcp_info;
+struct mptcp_sock;
struct seq_file;
/* MPTCP sk_buff extension data */
@@ -69,6 +71,10 @@ struct mptcp_out_options {
struct {
u64 sndr_key;
u64 rcvr_key;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ __sum16 csum;
};
struct {
struct mptcp_addr_info addr;
@@ -121,6 +127,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
struct mptcp_out_options *opts);
+void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
+
/* move the skb extension owership, with the assumption that 'to' is
* newly allocated
*/
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 38e4094960ce..04341d86585d 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -137,7 +137,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
u8 *opt, int opt_len,
struct ndisc_options *ndopts);
-void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
+void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
int data_len, int pad);
#define NDISC_OPS_REDIRECT_DATA_SPACE 2
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 22ced1381ede..38a0c1d24570 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -144,17 +144,18 @@ struct neighbour {
struct timer_list timer;
unsigned long used;
atomic_t probes;
- __u8 flags;
- __u8 nud_state;
- __u8 type;
- __u8 dead;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
u8 protocol;
+ u32 flags;
seqlock_t ha_lock;
unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
struct list_head gc_list;
+ struct list_head managed_list;
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
@@ -172,7 +173,7 @@ struct pneigh_entry {
struct pneigh_entry *next;
possible_net_t net;
struct net_device *dev;
- u8 flags;
+ u32 flags;
u8 protocol;
u8 key[];
};
@@ -216,11 +217,13 @@ struct neigh_table {
int gc_thresh3;
unsigned long last_flush;
struct delayed_work gc_work;
+ struct delayed_work managed_work;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
atomic_t gc_entries;
struct list_head gc_list;
+ struct list_head managed_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -250,12 +253,21 @@ static inline void *neighbour_priv(const struct neighbour *n)
}
/* flags for neigh_update() */
-#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
-#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
-#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
-#define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000
-#define NEIGH_UPDATE_F_ISROUTER 0x40000000
-#define NEIGH_UPDATE_F_ADMIN 0x80000000
+#define NEIGH_UPDATE_F_OVERRIDE BIT(0)
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1)
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2)
+#define NEIGH_UPDATE_F_USE BIT(3)
+#define NEIGH_UPDATE_F_MANAGED BIT(4)
+#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
+#define NEIGH_UPDATE_F_ISROUTER BIT(6)
+#define NEIGH_UPDATE_F_ADMIN BIT(7)
+
+/* In-kernel representation for NDA_FLAGS_EXT flags: */
+#define NTF_OLD_MASK 0xff
+#define NTF_EXT_SHIFT 8
+#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+
+#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
extern const struct nla_policy nda_policy[];
@@ -504,10 +516,15 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
{
const struct hh_cache *hh = &n->hh;
- if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
+ /* n->nud_state and hh->hh_len could be changed under us.
+ * neigh_hh_output() is taking care of the race later.
+ */
+ if (!skip_cache &&
+ (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
+ READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- else
- return n->output(n, skb);
+
+ return n->output(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index a16171c5fd9e..a0d9e0b47ab8 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -21,13 +21,19 @@ struct module;
#define NFT_JUMP_STACK_SIZE 16
+enum {
+ NFT_PKTINFO_L4PROTO = (1 << 0),
+ NFT_PKTINFO_INNER = (1 << 1),
+};
+
struct nft_pktinfo {
struct sk_buff *skb;
const struct nf_hook_state *state;
- bool tprot_set;
+ u8 flags;
u8 tprot;
u16 fragoff;
unsigned int thoff;
+ unsigned int inneroff;
};
static inline struct sock *nft_sk(const struct nft_pktinfo *pkt)
@@ -75,7 +81,7 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt)
{
- pkt->tprot_set = false;
+ pkt->flags = 0;
pkt->tprot = 0;
pkt->thoff = 0;
pkt->fragoff = 0;
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index eb4c094cd54d..c4a6147b0ef8 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -10,7 +10,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
struct iphdr *ip;
ip = ip_hdr(pkt->skb);
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = ip->protocol;
pkt->thoff = ip_hdrlen(pkt->skb);
pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET;
@@ -36,7 +36,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
else if (len < thoff)
return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
pkt->thoff = thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
@@ -71,7 +71,7 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
goto inhdr_error;
}
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
pkt->thoff = thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
@@ -82,4 +82,5 @@ inhdr_error:
__IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS);
return -1;
}
+
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 7595e02b00ba..ec7eaeaf4f04 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -18,7 +18,7 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt)
return;
}
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
@@ -50,7 +50,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
if (protohdr < 0)
return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
@@ -96,7 +96,7 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
if (protohdr < 0)
goto inhdr_error;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 832ab69efda5..4c3809e141f4 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -6,7 +6,7 @@
struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
spinlock_t lock;
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index a4082406a003..3855f069627f 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -216,24 +216,14 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
page_pool_put_full_page(pool, page, true);
}
-#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
- (sizeof(dma_addr_t) > sizeof(unsigned long))
-
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
- dma_addr_t ret = page->dma_addr;
-
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
-
- return ret;
+ return page->dma_addr;
}
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
page->dma_addr = addr;
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- page->dma_addr_upper = upper_32_bits(addr);
}
static inline void page_pool_set_frag_count(struct page *page, long nr)
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 83a6d0792180..193f88ebf629 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -765,7 +765,7 @@ struct tc_cookie {
};
struct tc_qopt_offload_stats {
- struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_basic_sync *bstats;
struct gnet_stats_queue *qstats;
};
@@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
};
struct tc_gred_qopt_offload_stats {
- struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_basic_sync bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs];
};
@@ -977,6 +977,7 @@ enum tc_tbf_command {
TC_TBF_REPLACE,
TC_TBF_DESTROY,
TC_TBF_STATS,
+ TC_TBF_GRAFT,
};
struct tc_tbf_qopt_offload_replace_params {
@@ -992,6 +993,7 @@ struct tc_tbf_qopt_offload {
union {
struct tc_tbf_qopt_offload_replace_params replace_params;
struct tc_qopt_offload_stats stats;
+ u32 child_handle;
};
};
diff --git a/include/net/rose.h b/include/net/rose.h
index cf517d306a28..0f0a4ce0fee7 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -162,8 +162,8 @@ extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
-int rosecmp(rose_address *, rose_address *);
-int rosecmpm(rose_address *, rose_address *, unsigned short);
+int rosecmp(const rose_address *, const rose_address *);
+int rosecmpm(const rose_address *, const rose_address *, unsigned short);
char *rose2asc(char *buf, const rose_address *);
struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
void rose_kill_by_neigh(struct rose_neigh *);
@@ -205,8 +205,8 @@ extern const struct seq_operations rose_node_seqops;
extern struct seq_operations rose_route_seqops;
void rose_add_loopback_neigh(void);
-int __must_check rose_add_loopback_node(rose_address *);
-void rose_del_loopback_node(rose_address *);
+int __must_check rose_add_loopback_node(const rose_address *);
+void rose_del_loopback_node(const rose_address *);
void rose_rt_device_down(struct net_device *);
void rose_link_device_down(struct net_device *);
struct net_device *rose_dev_first(void);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c0069ac00e62..22179b2fda72 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -40,6 +40,13 @@ enum qdisc_state_t {
__QDISC_STATE_DRAINING,
};
+enum qdisc_state2_t {
+ /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
+ * Use qdisc_run_begin/end() or qdisc_is_running() instead.
+ */
+ __QDISC_STATE2_RUNNING,
+};
+
#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
@@ -97,7 +104,7 @@ struct Qdisc {
struct netdev_queue *dev_queue;
struct net_rate_estimator __rcu *rate_est;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
int pad;
refcount_t refcnt;
@@ -107,10 +114,10 @@ struct Qdisc {
*/
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
- struct gnet_stats_basic_packed bstats;
- seqcount_t running;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
unsigned long state;
+ unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
@@ -143,11 +150,15 @@ static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
return NULL;
}
+/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
+ * root_lock section, or provide their own memory barriers -- ordering
+ * against qdisc_run_begin/end() atomic bit operations.
+ */
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
return spin_is_locked(&qdisc->seqlock);
- return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
+ return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
@@ -167,6 +178,9 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
return !READ_ONCE(qdisc->q.qlen);
}
+/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
+ * the qdisc root lock acquired.
+ */
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
@@ -206,15 +220,8 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
* after it releases the lock at the end of qdisc_run_end().
*/
return spin_trylock(&qdisc->seqlock);
- } else if (qdisc_is_running(qdisc)) {
- return false;
}
- /* Variant of write_seqcount_begin() telling lockdep a trylock
- * was attempted.
- */
- raw_write_seqcount_begin(&qdisc->running);
- seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
- return true;
+ return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
@@ -226,7 +233,7 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
&qdisc->state)))
__netif_schedule(qdisc);
} else {
- write_seqcount_end(&qdisc->running);
+ __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
}
@@ -308,6 +315,8 @@ struct Qdisc_ops {
struct netlink_ext_ack *extack);
void (*attach)(struct Qdisc *sch);
int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
+ void (*change_real_num_tx)(struct Qdisc *sch,
+ unsigned int new_real_tx);
int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
@@ -590,14 +599,6 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
-{
- struct Qdisc *root = qdisc_root_sleeping(qdisc);
-
- ASSERT_RTNL();
- return &root->running;
-}
-
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
@@ -684,6 +685,8 @@ void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
int dev_qdisc_change_tx_queue_len(struct net_device *dev);
+void dev_qdisc_change_real_num_tx(struct net_device *dev,
+ unsigned int new_real_tx);
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
@@ -845,14 +848,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free);
}
-static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets)
{
- bstats->bytes += bytes;
- bstats->packets += packets;
+ u64_stats_update_begin(&bstats->syncp);
+ u64_stats_add(&bstats->bytes, bytes);
+ u64_stats_add(&bstats->packets, packets);
+ u64_stats_update_end(&bstats->syncp);
}
-static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
_bstats_update(bstats,
@@ -860,26 +865,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}
-static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- __u64 bytes, __u32 packets)
-{
- u64_stats_update_begin(&bstats->syncp);
- _bstats_update(&bstats->bstats, bytes, packets);
- u64_stats_update_end(&bstats->syncp);
-}
-
-static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- const struct sk_buff *skb)
-{
- u64_stats_update_begin(&bstats->syncp);
- bstats_update(&bstats->bstats, skb);
- u64_stats_update_end(&bstats->syncp);
-}
-
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
@@ -968,10 +957,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };
- __u32 len = qdisc_qlen_sum(sch);
- __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
- *qlen = qstats.qlen;
+ gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
+ *qlen = qstats.qlen + qdisc_qlen(sch);
*backlog = qstats.backlog;
}
@@ -1312,15 +1300,15 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
struct mini_Qdisc {
struct tcf_proto *filter_list;
struct tcf_block *block;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct rcu_head rcu;
+ unsigned long rcu_state;
};
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
@@ -1341,6 +1329,8 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
struct tcf_block *block);
+void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
+
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
#endif
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 69bab88ad66b..189fdb9db162 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -626,7 +626,8 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
static inline int sctp_transport_pl_hlen(struct sctp_transport *t)
{
- return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0);
+ return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) -
+ sizeof(struct sctphdr);
}
static inline void sctp_transport_pl_reset(struct sctp_transport *t)
@@ -653,12 +654,10 @@ static inline void sctp_transport_pl_update(struct sctp_transport *t)
if (t->pl.state == SCTP_PL_DISABLED)
return;
- if (del_timer(&t->probe_timer))
- sctp_transport_put(t);
-
t->pl.state = SCTP_PL_BASE;
t->pl.pmtu = SCTP_BASE_PLPMTU;
t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
}
static inline bool sctp_transport_pl_enabled(struct sctp_transport *t)
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2eb6d7c2c931..f37c7a558d6d 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
* Verification Tag value does not match the receiver's own
* tag value, the receiver shall silently discard the packet...
*/
- if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
- return 1;
+ if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+ return 0;
chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
- return 0;
+ return 1;
}
/* Check VTAG of the packet matches the sender's own tag and the T bit is
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 651bba654d77..899c29c326ba 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1355,16 +1355,6 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
-
- /* Security identifiers from incoming (INIT). These are set by
- * security_sctp_assoc_request(). These will only be used by
- * SCTP TCP type sockets and peeled off connections as they
- * cause a new socket to be generated. security_sctp_sk_clone()
- * will then plug these into the new socket.
- */
-
- u32 secid;
- u32 peer_secid;
};
/* Recover the outter endpoint structure. */
@@ -2104,6 +2094,16 @@ struct sctp_association {
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
+
struct rcu_head rcu;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index ea6fbc88c8f9..b32906e1ab55 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -259,10 +259,11 @@ struct bpf_local_storage;
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -270,6 +271,7 @@ struct bpf_local_storage;
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@ -329,7 +331,6 @@ struct bpf_local_storage;
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@ -394,7 +395,6 @@ struct sock {
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -413,6 +413,7 @@ struct sock {
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
+ u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */
@@ -431,6 +432,9 @@ struct sock {
struct xfrm_policy __rcu *sk_policy[2];
#endif
struct dst_entry *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@ -443,7 +447,6 @@ struct sock {
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@ -1207,13 +1210,16 @@ struct proto {
unsigned int inuse_idx;
#endif
+ int (*forward_alloc_get)(const struct sock *sk);
+
bool (*stream_memory_free)(const struct sock *sk, int wake);
- bool (*stream_memory_read)(const struct sock *sk);
+ bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
@@ -1237,7 +1243,7 @@ struct proto {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@ -1291,20 +1297,22 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
+static inline int sk_forward_alloc_get(const struct sock *sk)
+{
+ if (!sk->sk_prot->forward_alloc_get)
+ return sk->sk_forward_alloc;
+
+ return sk->sk_prot->forward_alloc_get(sk);
+}
+
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
return false;
-#ifdef CONFIG_INET
- return sk->sk_prot->stream_memory_free ?
- INDIRECT_CALL_1(sk->sk_prot->stream_memory_free,
- tcp_stream_memory_free,
- sk, wake) : true;
-#else
return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk, wake) : true;
-#endif
+ INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
+ tcp_stream_memory_free, sk, wake) : true;
}
static inline bool sk_stream_memory_free(const struct sock *sk)
@@ -1518,20 +1526,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
skb_pfmemalloc(skb);
}
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+}
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable);
+}
+
+static inline void sk_mem_reclaim_final(struct sock *sk)
+{
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1541,11 +1578,19 @@ static inline void sk_mem_charge(struct sock *sk, int size)
sk->sk_forward_alloc -= size;
}
+/* the following macros control memory reclaiming in sk_mem_uncharge()
+ */
+#define SK_RECLAIM_THRESHOLD (1 << 21)
+#define SK_RECLAIM_CHUNK (1 << 20)
+
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim()
@@ -1554,23 +1599,8 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway.
*/
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
- __sk_mem_reclaim(sk, 1 << 20);
-}
-
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
-{
- sk_wmem_queued_add(sk, -skb->truesize);
- sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
- __kfree_skb(skb);
+ if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
+ __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK);
}
static inline void sock_release_ownership(struct sock *sk)
@@ -1889,10 +1919,8 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
- return;
-
- sk->sk_rx_queue_mapping = rx_queue;
+ if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
@@ -1900,15 +1928,19 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
static inline void sk_rx_queue_clear(struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
#endif
}
static inline int sk_rx_queue_get(const struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_rx_queue_mapping;
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
#endif
return -1;
@@ -2388,13 +2420,11 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule);
-
/**
* sk_page_frag - return an appropriate page_frag
* @sk: socket
@@ -2608,7 +2638,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@ -2620,12 +2649,6 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
@@ -2820,4 +2843,14 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs);
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
+static inline bool sk_is_readable(struct sock *sk)
+{
+ if (sk->sk_prot->sock_is_readable)
+ return sk->sk_prot->sock_is_readable(sk);
+ return false;
+}
#endif /* _SOCK_H */
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 1d20b98493a1..732b7097d78e 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -54,10 +54,28 @@ struct strp_msg {
int offset;
};
+struct _strp_msg {
+ /* Internal cb structure. struct strp_msg must be first for passing
+ * to upper layer.
+ */
+ struct strp_msg strp;
+ int accum_len;
+};
+
+struct sk_skb_cb {
+#define SK_SKB_CB_PRIV_LEN 20
+ unsigned char data[SK_SKB_CB_PRIV_LEN];
+ struct _strp_msg strp;
+ /* temp_reg is a temporary register used for bpf_convert_data_end_access
+ * when dst_reg == src_reg.
+ */
+ u64 temp_reg;
+};
+
static inline struct strp_msg *strp_msg(struct sk_buff *skb)
{
return (struct strp_msg *)((void *)skb->cb +
- offsetof(struct qdisc_skb_cb, data));
+ offsetof(struct sk_skb_cb, strp));
}
/* Structure for an attached lower socket */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 60d806b6a5ae..d353793dfeb5 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -299,28 +299,16 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
-int switchdev_handle_fdb_add_to_device(struct net_device *dev,
+int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
bool (*foreign_dev_check_cb)(const struct net_device *dev,
const struct net_device *foreign_dev),
- int (*add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info));
-
-int switchdev_handle_fdb_del_to_device(struct net_device *dev,
- const struct switchdev_notifier_fdb_info *fdb_info,
- bool (*check_cb)(const struct net_device *dev),
- bool (*foreign_dev_check_cb)(const struct net_device *dev,
- const struct net_device *foreign_dev),
- int (*del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info));
int switchdev_handle_port_obj_add(struct net_device *dev,
@@ -426,32 +414,16 @@ call_switchdev_blocking_notifiers(unsigned long val,
}
static inline int
-switchdev_handle_fdb_add_to_device(struct net_device *dev,
- const struct switchdev_notifier_fdb_info *fdb_info,
- bool (*check_cb)(const struct net_device *dev),
- bool (*foreign_dev_check_cb)(const struct net_device *dev,
- const struct net_device *foreign_dev),
- int (*add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info))
-{
- return 0;
-}
-
-static inline int
-switchdev_handle_fdb_del_to_device(struct net_device *dev,
+switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
bool (*foreign_dev_check_cb)(const struct net_device *dev,
const struct net_device *foreign_dev),
- int (*del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info))
{
return 0;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3166dc15d7d6..4da22b41bde6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -48,7 +48,9 @@
extern struct inet_hashinfo tcp_hashinfo;
-extern struct percpu_counter tcp_orphan_count;
+DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
+int tcp_orphan_count_sum(void);
+
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
@@ -288,21 +290,18 @@ static inline bool tcp_out_of_memory(struct sock *sk)
return false;
}
-void sk_forced_mem_schedule(struct sock *sk, int size);
-
-static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
+static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
- struct percpu_counter *ocp = sk->sk_prot->orphan_count;
- int orphans = percpu_counter_read_positive(ocp);
-
- if (orphans << shift > sysctl_tcp_max_orphans) {
- orphans = percpu_counter_sum_positive(ocp);
- if (orphans << shift > sysctl_tcp_max_orphans)
- return true;
- }
- return false;
+ sk_wmem_queued_add(sk, -skb->truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_uncharge(sk, skb->truesize);
+ else
+ sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
+ __kfree_skb(skb);
}
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
bool tcp_check_oom(struct sock *sk, int shift);
@@ -322,7 +321,7 @@ void tcp_shutdown(struct sock *sk, int how);
int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
-void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb);
+void tcp_remove_empty_skb(struct sock *sk);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
@@ -330,8 +329,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
-struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
- struct page *page, int offset, size_t *size);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
@@ -350,6 +347,8 @@ void tcp_twsk_destructor(struct sock *sk);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule);
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
@@ -581,6 +580,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
@@ -874,10 +875,11 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
+#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
/* There is space for up to 24 bytes */
- __u32 in_flight:30,/* Bytes in flight at transmit */
- is_app_limited:1, /* cwnd not fully used? */
- unused:1;
+ __u32 is_app_limited:1, /* cwnd not fully used? */
+ delivered_ce:20,
+ unused:11;
/* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered;
/* start of send pipeline phase */
@@ -975,7 +977,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return likely(tcp_skb_can_collapse_to(to) &&
- mptcp_skb_can_collapse(to, from));
+ mptcp_skb_can_collapse(to, from) &&
+ skb_pure_zcopy_same(to, from));
}
/* Events passed to congestion control interface */
@@ -1029,7 +1032,9 @@ struct ack_sample {
struct rate_sample {
u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
+ s32 delivered_ce; /* number of packets delivered w/ CE marks*/
long interval_us; /* time for tp->delivered to incr "delivered" */
u32 snd_interval_us; /* snd interval for delivered packets */
u32 rcv_interval_us; /* rcv interval for delivered packets */
@@ -1418,6 +1423,17 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+}
+
void tcp_cleanup_rbuf(struct sock *sk, int copied);
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
@@ -1576,6 +1592,7 @@ struct tcp_md5sig_key {
u8 keylen;
u8 family; /* AF_INET or AF_INET6 */
u8 prefixlen;
+ u8 flags;
union tcp_md5_addr addr;
int l3index; /* set if key added with L3 scope */
u8 key[TCP_MD5SIG_MAXKEYLEN];
@@ -1621,10 +1638,10 @@ struct tcp_md5sig_pool {
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index,
+ int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index);
+ int family, u8 prefixlen, int l3index, u8 flags);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
@@ -1869,7 +1886,7 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
{
list_del(&skb->tcp_tsorted_anchor);
tcp_rtx_queue_unlink(skb, sk);
- sk_wmem_free_skb(sk, skb);
+ tcp_wmem_free_skb(sk, skb);
}
static inline void tcp_push_pending_frames(struct sock *sk)
diff --git a/include/net/tls.h b/include/net/tls.h
index be4b3e1cac46..526cb2c3b724 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -66,7 +66,7 @@
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
-/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@ -74,6 +74,7 @@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
+#define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
@@ -220,6 +221,8 @@ union tls_crypto_context {
struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
};
};
@@ -358,6 +361,7 @@ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
+void tls_err_abort(struct sock *sk, int err);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
@@ -375,7 +379,7 @@ void tls_sw_release_resources_rx(struct sock *sk);
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-bool tls_sw_stream_read(const struct sock *sk);
+bool tls_sw_sock_is_readable(struct sock *sk);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
@@ -466,12 +470,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
#endif
}
-static inline void tls_err_abort(struct sock *sk, int err)
-{
- sk->sk_err = err;
- sk_error_report(sk);
-}
-
static inline bool tls_bigint_increment(unsigned char *seq, int len)
{
int i;
@@ -512,7 +510,7 @@ static inline void tls_advance_record_sn(struct sock *sk,
struct cipher_context *ctx)
{
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
if (prot->version != TLS_1_3_VERSION &&
prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
diff --git a/include/net/udp.h b/include/net/udp.h
index 360df454356c..909ecf447e0f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -494,8 +494,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
* CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
* packets in udp_gro_complete_segment. As does UDP GSO, verified by
* udp_send_skb. But when those packets are looped in dev_loopback_xmit
- * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
- * specific case, where PARTIAL is both correct and required.
+ * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
+ * Reset in this specific case, where PARTIAL is both correct and
+ * required.
*/
if (skb->pkt_type == PACKET_LOOPBACK)
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/include/net/xdp.h b/include/net/xdp.h
index ad5b02dcb6f4..447f9b1578f3 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -15,13 +15,13 @@
* level RX-ring queues. It is information that is specific to how
* the driver have configured a given RX-ring queue.
*
- * Each xdp_buff frame received in the driver carry a (pointer)
+ * Each xdp_buff frame received in the driver carries a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
* data-path read-access to RX-info for both kernel and bpf-side
* (limited subset).
*
* For now, direct access is only safe while running in NAPI/softirq
- * context. Contents is read-mostly and must not be updated during
+ * context. Contents are read-mostly and must not be updated during
* driver NAPI/softirq poll.
*
* The driver usage API is a register and unregister API.
@@ -30,8 +30,8 @@
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
* naturally need to stop the RX-ring before purging and reallocating
- * memory. In that process the driver MUST call unregistor (which
- * also apply for driver shutdown and unload). The register API is
+ * memory. In that process the driver MUST call unregister (which
+ * also applies for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
*/
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 4e295541e396..443d45951564 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -77,6 +77,12 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return xp_alloc(pool);
}
+/* Returns as many entries as possible up to max. 0 <= N <= max. */
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return xp_alloc_batch(pool, xdp, max);
+}
+
static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return xp_can_alloc(pool, count);
@@ -89,6 +95,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb);
}
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
+}
+
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{
@@ -212,6 +225,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return NULL;
}
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return 0;
+}
+
static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return false;
@@ -221,6 +239,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+}
+
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 7a9a23e7a604..ddeefc4a1040 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -7,6 +7,7 @@
#include <linux/if_xdp.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/bpf.h>
#include <net/xdp.h>
struct xsk_buff_pool;
@@ -23,7 +24,6 @@ struct xdp_buff_xsk {
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- bool unaligned;
u64 orig_addr;
struct list_head free_list_node;
};
@@ -67,6 +67,7 @@ struct xsk_buff_pool {
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
+ u32 chunk_shift;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
@@ -81,6 +82,13 @@ struct xsk_buff_pool {
struct xdp_buff_xsk *free_heads[];
};
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
/* AF_XDP core. */
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem);
@@ -89,7 +97,6 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id);
void xp_destroy(struct xsk_buff_pool *pool);
-void xp_release(struct xdp_buff_xsk *xskb);
void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool);
void xp_clear_dev(struct xsk_buff_pool *pool);
@@ -99,12 +106,28 @@ void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
+static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ xskb->orig_addr = addr;
+ xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+}
+
+static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ dma_addr_t *dma_pages, u64 addr)
+{
+ xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
+ (addr & ~PAGE_MASK);
+ xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+}
+
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
@@ -180,4 +203,25 @@ static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
xp_unaligned_extract_offset(addr);
}
+static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
+}
+
+static inline void xp_release(struct xdp_buff_xsk *xskb)
+{
+ if (xskb->pool->unaligned)
+ xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
+}
+
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+{
+ u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+
+ offset += xskb->pool->headroom;
+ if (!xskb->pool->unaligned)
+ return xskb->orig_addr + offset;
+ return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+}
+
#endif /* XSK_BUFF_POOL_H_ */
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 7e542205861c..8ae07c0ecdf7 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -232,6 +232,7 @@ static inline u32 ib_get_sqpn(struct ib_other_headers *ohdr)
#define IB_BTH_SE_SHIFT 23
#define IB_BTH_TVER_MASK 0xf
#define IB_BTH_TVER_SHIFT 16
+#define IB_BTH_OPCODE_CNP 0x81
static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr)
{
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 5ae9dff74dac..92a673cd9b4f 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,6 +38,7 @@ struct ib_umem_dmabuf {
unsigned long first_sg_offset;
unsigned long last_sg_trim;
void *private;
+ u8 pinned : 1;
};
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
@@ -139,6 +140,10 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
unsigned long offset, size_t size,
int fd, int access,
const struct dma_buf_attach_ops *ops);
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access);
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
@@ -179,6 +184,12 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
{
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
+ size_t size, int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
{
return -EOPNOTSUPP;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4b50d9a3018a..6e9ad656ecb7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -545,6 +545,22 @@ enum ib_port_speed {
IB_SPEED_NDR = 128,
};
+enum ib_stat_flag {
+ IB_STAT_FLAG_OPTIONAL = 1 << 0,
+};
+
+/**
+ * struct rdma_stat_desc
+ * @name - The name of the counter
+ * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
+ * @priv - Driver private information; Core code should not use
+ */
+struct rdma_stat_desc {
+ const char *name;
+ unsigned int flags;
+ const void *priv;
+};
+
/**
* struct rdma_hw_stats
* @lock - Mutex to protect parallel write access to lifespan and values
@@ -555,8 +571,10 @@ enum ib_port_speed {
* should be before being updated again. Stored in jiffies, defaults
* to 10 milliseconds, drivers can override the default be specifying
* their own value during their allocation routine.
- * @name - Array of pointers to static names used for the counters in
- * directory.
+ * @descs - Array of pointers to static descriptors used for the counters
+ * in directory.
+ * @is_disabled - A bitmap to indicate each counter is currently disabled
+ * or not.
* @num_counters - How many hardware counters there are. If name is
* shorter than this number, a kernel oops will result. Driver authors
* are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
@@ -568,36 +586,19 @@ struct rdma_hw_stats {
struct mutex lock; /* Protect lifespan and values[] */
unsigned long timestamp;
unsigned long lifespan;
- const char * const *names;
+ const struct rdma_stat_desc *descs;
+ unsigned long *is_disabled;
int num_counters;
u64 value[];
};
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
-/**
- * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
- * for drivers.
- * @names - Array of static const char *
- * @num_counters - How many elements in array
- * @lifespan - How many milliseconds between updates
- */
-static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
- const char * const *names, int num_counters,
- unsigned long lifespan)
-{
- struct rdma_hw_stats *stats;
- stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
- GFP_KERNEL);
- if (!stats)
- return NULL;
- stats->names = names;
- stats->num_counters = num_counters;
- stats->lifespan = msecs_to_jiffies(lifespan);
-
- return stats;
-}
+struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const struct rdma_stat_desc *descs, int num_counters,
+ unsigned long lifespan);
+void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
/* Define bits for the various functionality this port needs to be supported by
* the core.
@@ -2570,6 +2571,13 @@ struct ib_device_ops {
struct rdma_hw_stats *stats, u32 port, int index);
/**
+ * modify_hw_stat - Modify the counter configuration
+ * @enable: true/false when enable/disable a counter
+ * Return codes - 0 on success or error code otherwise.
+ */
+ int (*modify_hw_stat)(struct ib_device *device, u32 port,
+ unsigned int counter_index, bool enable);
+ /**
* Allows rdma drivers to add their own restrack attributes.
*/
int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
@@ -2906,6 +2914,15 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
size_t length, u32 min_pgoff,
u32 max_pgoff);
+static inline int
+rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 pgoff)
+{
+ return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
+ pgoff);
+}
+
struct rdma_user_mmap_entry *
rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
unsigned long pgoff);
@@ -4097,8 +4114,13 @@ static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
+ int nents;
+
if (ib_uses_virt_dma(dev)) {
- ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
+ nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
+ if (!nents)
+ return -EIO;
+ sgt->nents = nents;
return 0;
}
return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h
index 0295b22cd1cd..45d5481a7846 100644
--- a/include/rdma/rdma_counter.h
+++ b/include/rdma/rdma_counter.h
@@ -63,4 +63,6 @@ int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
enum rdma_nl_counter_mask *mask);
+int rdma_counter_modify(struct ib_device *dev, u32 port,
+ unsigned int index, bool enable);
#endif /* _RDMA_COUNTER_H_ */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6fe125a71b60..79e4903bd414 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -664,6 +664,7 @@ extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
+int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
extern int sas_slave_configure(struct scsi_device *);
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index 4726c1bbec65..64154c1fed02 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -323,8 +323,10 @@ struct ssp_response_iu {
__be32 sense_data_len;
__be32 response_data_len;
- u8 resp_data[0];
- u8 sense_data[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, resp_data);
+ DECLARE_FLEX_ARRAY(u8, sense_data);
+ };
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -554,8 +556,10 @@ struct ssp_response_iu {
__be32 sense_data_len;
__be32 response_data_len;
- u8 resp_data[0];
- u8 sense_data[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, resp_data);
+ DECLARE_FLEX_ARRAY(u8, sense_data);
+ };
} __attribute__ ((packed));
struct ssp_command_iu {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index eaf04c9a1dfc..477a800a9543 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,7 +10,6 @@
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
#include <scsi/scsi_request.h>
struct Scsi_Host;
@@ -65,10 +64,16 @@ struct scsi_pointer {
#define SCMD_STATE_COMPLETE 0
#define SCMD_STATE_INFLIGHT 1
+enum scsi_cmnd_submitter {
+ SUBMITTED_BY_BLOCK_LAYER = 0,
+ SUBMITTED_BY_SCSI_ERROR_HANDLER = 1,
+ SUBMITTED_BY_SCSI_RESET_IOCTL = 2,
+} __packed;
+
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
- struct list_head eh_entry; /* entry for the host eh_cmd_q */
+ struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */
struct delayed_work abort_work;
struct rcu_head rcu;
@@ -90,6 +95,7 @@ struct scsi_cmnd {
unsigned char prot_op;
unsigned char prot_type;
unsigned char prot_flags;
+ enum scsi_cmnd_submitter submitter;
unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
@@ -117,10 +123,6 @@ struct scsi_cmnd {
* command (auto-sense). Length must be
* SCSI_SENSE_BUFFERSIZE bytes. */
- /* Low-level done function - can be used by low-level driver to point
- * to completion function. Not used by mid/upper level code. */
- void (*scsi_done) (struct scsi_cmnd *);
-
/*
* The following fields can be written to by the host specific code.
* Everything else should be left alone.
@@ -165,6 +167,8 @@ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
return *(struct scsi_driver **)rq->rq_disk->private_data;
}
+void scsi_done(struct scsi_cmnd *cmd);
+
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
@@ -396,4 +400,7 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc,
u8 key, u8 asc, u8 ascq);
+struct request *scsi_alloc_request(struct request_queue *q,
+ unsigned int op, blk_mq_req_flags_t flags);
+
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b97e142a7ca9..d1c6fc83b1e3 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -5,7 +5,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <scsi/scsi.h>
#include <linux/atomic.h>
#include <linux/sbitmap.h>
@@ -207,6 +207,7 @@ struct scsi_device {
* creation time */
unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
+ unsigned int queue_stopped; /* request queue is quiesced */
bool offline_already; /* Device offline message logged */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 75363707b73f..ebe059badba0 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -474,14 +474,9 @@ struct scsi_host_template {
#define SCSI_DEFAULT_HOST_BLOCKED 7
/*
- * Pointer to the sysfs class properties for this host, NULL terminated.
+ * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
*/
- struct device_attribute **shost_attrs;
-
- /*
- * Pointer to the SCSI device properties for this host, NULL terminated.
- */
- struct device_attribute **sdev_attrs;
+ const struct attribute_group **shost_groups;
/*
* Pointer to the SCSI device attribute groups for this host,
@@ -516,7 +511,7 @@ struct scsi_host_template {
unsigned long irq_flags; \
int rc; \
spin_lock_irqsave(shost->host_lock, irq_flags); \
- rc = func_name##_lck (cmd, cmd->scsi_done); \
+ rc = func_name##_lck(cmd); \
spin_unlock_irqrestore(shost->host_lock, irq_flags); \
return rc; \
}
@@ -556,6 +551,7 @@ struct Scsi_Host {
struct mutex scan_mutex;/* serialize scanning activity */
+ struct list_head eh_abort_list;
struct list_head eh_cmd_q;
struct task_struct * ehandler; /* Error recovery thread. */
struct completion * eh_action; /* Wait for specific actions on the
@@ -695,6 +691,12 @@ struct Scsi_Host {
/* ldm bits */
struct device shost_gendev, shost_dev;
+ /*
+ * The array size 3 provides space for one attribute group defined by
+ * the SCSI core, one attribute group defined by the SCSI LLD and one
+ * terminating NULL pointer.
+ */
+ const struct attribute_group *shost_dev_attr_groups[3];
/*
* Points to the transport data (if any) which is allocated
@@ -798,16 +800,6 @@ void scsi_host_busy_iter(struct Scsi_Host *,
struct class_container;
/*
- * These two functions are used to allocate and free a pseudo device
- * which will connect to the host adapter itself rather than any
- * physical device. You must deallocate when you are done with the
- * thing. This physical pseudo-device isn't real and won't be available
- * from any high-level drivers.
- */
-extern void scsi_free_host_dev(struct scsi_device *);
-extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
-
-/*
* DIF defines the exchange of protection information between
* initiator and SBC block device.
*
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 05ec927a3c72..0e75b9277c8c 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -41,6 +41,7 @@ enum sas_linkrate {
SAS_LINK_RATE_G2 = SAS_LINK_RATE_3_0_GBPS,
SAS_LINK_RATE_6_0_GBPS = 10,
SAS_LINK_RATE_12_0_GBPS = 11,
+ SAS_LINK_RATE_22_5_GBPS = 12,
/* These are virtual to the transport class and may never
* be signalled normally since the standard defined field
* is only 4 bits */
diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h
index 7ecde3b159c8..ae99d3e855f1 100644
--- a/include/soc/arc/timers.h
+++ b/include/soc/arc/timers.h
@@ -17,8 +17,8 @@
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
/* CTRL reg bits */
-#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
-#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+#define ARC_TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
+#define ARC_TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
#define ARC_TIMERN_MAX 0xFFFFFFFF
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index c9d849924f89..4bf62de2e00e 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -44,6 +44,7 @@ struct device;
* @regs_cinh: The cache inhibited regs
* @dpio_id: The dpio index
* @qman_version: The qman version
+ * @qman_clk: The qman clock frequency in Hz
*
* Describes the attributes and features of the DPIO object.
*/
@@ -55,6 +56,7 @@ struct dpaa2_io_desc {
void __iomem *regs_cinh;
int dpio_id;
u32 qman_version;
+ u32 qman_clk;
};
struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
@@ -129,4 +131,11 @@ int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
u32 *fcnt, u32 *bcnt);
int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
u32 *num);
+
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff);
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff);
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce);
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d);
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes);
#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index d7055b41982d..fef3a36b0210 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -563,9 +563,22 @@ struct ocelot_vcap_block {
int pol_lpr;
};
-struct ocelot_vlan {
- bool valid;
+struct ocelot_bridge_vlan {
u16 vid;
+ unsigned long portmask;
+ unsigned long untagged;
+ struct list_head list;
+};
+
+enum ocelot_port_tag_config {
+ /* all VLANs are egress-untagged */
+ OCELOT_PORT_TAG_DISABLED = 0,
+ /* all VLANs except the native VLAN and VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_NATIVE = 1,
+ /* all VLANs except VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK_NO_VID0 = 2,
+ /* all VLANs are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK = 3,
};
enum ocelot_sb {
@@ -590,9 +603,7 @@ struct ocelot_port {
bool vlan_aware;
/* VLAN that untagged frames are classified to, on ingress */
- struct ocelot_vlan pvid_vlan;
- /* The VLAN ID that will be transmitted as untagged, on egress */
- struct ocelot_vlan native_vlan;
+ const struct ocelot_bridge_vlan *pvid_vlan;
unsigned int ptp_skbs_in_flight;
u8 ptp_cmd;
@@ -635,8 +646,7 @@ struct ocelot {
u8 base_mac[ETH_ALEN];
- /* Keep track of the vlan port masks */
- u32 vlan_mask[VLAN_N_VID];
+ struct list_head vlans;
/* Switches like VSC9959 have flooding per traffic class */
int num_flooding_pgids;
@@ -665,6 +675,9 @@ struct ocelot {
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
+ /* Lock for serializing access to the MAC table */
+ struct mutex mact_lock;
+
struct workqueue_struct *owq;
u8 ptp:1;
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index 4869ebbd438d..eeb1142aa1b1 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -576,6 +576,16 @@ enum ocelot_mask_mode {
OCELOT_MASK_MODE_REDIRECT,
};
+enum ocelot_es0_vid_sel {
+ OCELOT_ES0_VID_PLUS_CLASSIFIED_VID = 0,
+ OCELOT_ES0_VID = 1,
+};
+
+enum ocelot_es0_pcp_sel {
+ OCELOT_CLASSIFIED_PCP = 0,
+ OCELOT_ES0_PCP = 1,
+};
+
enum ocelot_es0_tag {
OCELOT_NO_ES0_TAG,
OCELOT_ES0_TAG,
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 000000000000..4951f9d8b0bd
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ */
+
+#ifndef __SPM_H__
+#define __SPM_H__
+
+#include <linux/cpuidle.h>
+
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+
+enum pm_sleep_mode {
+ PM_SLEEP_MODE_STBY,
+ PM_SLEEP_MODE_RET,
+ PM_SLEEP_MODE_SPC,
+ PM_SLEEP_MODE_PC,
+ PM_SLEEP_MODE_NR,
+};
+
+struct spm_reg_data {
+ const u16 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u32 avs_ctl;
+ u32 avs_limit;
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+};
+
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode);
+
+#endif /* __SPM_H__ */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 990701f788bc..67d2bc856fbc 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -6,6 +6,8 @@
#ifndef __SOC_TEGRA_FUSE_H__
#define __SOC_TEGRA_FUSE_H__
+#include <linux/types.h>
+
#define TEGRA20 0x20
#define TEGRA30 0x30
#define TEGRA114 0x35
@@ -22,11 +24,6 @@
#ifndef __ASSEMBLY__
-u32 tegra_read_chipid(void);
-u8 tegra_get_chip_id(void);
-u8 tegra_get_platform(void);
-bool tegra_is_silicon(void);
-
enum tegra_revision {
TEGRA_REVISION_UNKNOWN = 0,
TEGRA_REVISION_A01,
@@ -57,6 +54,10 @@ extern struct tegra_sku_info tegra_sku_info;
u32 tegra_read_straps(void);
u32 tegra_read_ram_code(void);
int tegra_fuse_readl(unsigned long offset, u32 *value);
+u32 tegra_read_chipid(void);
+u8 tegra_get_chip_id(void);
+u8 tegra_get_platform(void);
+bool tegra_is_silicon(void);
#else
static struct tegra_sku_info tegra_sku_info __maybe_unused;
@@ -74,6 +75,26 @@ static inline int tegra_fuse_readl(unsigned long offset, u32 *value)
{
return -ENODEV;
}
+
+static inline u32 tegra_read_chipid(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_chip_id(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_platform(void)
+{
+ return 0;
+}
+
+static inline bool tegra_is_silicon(void)
+{
+ return false;
+}
#endif
struct device *tegra_soc_device_register(void);
diff --git a/include/soc/tegra/irq.h b/include/soc/tegra/irq.h
index 8eb11a7109e4..94539551c8c1 100644
--- a/include/soc/tegra/irq.h
+++ b/include/soc/tegra/irq.h
@@ -6,8 +6,15 @@
#ifndef __SOC_TEGRA_IRQ_H
#define __SOC_TEGRA_IRQ_H
-#if defined(CONFIG_ARM)
+#include <linux/types.h>
+
+#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
bool tegra_pending_sgi(void);
+#else
+static inline bool tegra_pending_sgi(void)
+{
+ return false;
+}
#endif
#endif /* __SOC_TEGRA_IRQ_H */
diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h
index 433878927026..ce4d0b1bd0d6 100644
--- a/include/soc/tegra/pm.h
+++ b/include/soc/tegra/pm.h
@@ -17,7 +17,7 @@ enum tegra_suspend_mode {
TEGRA_SUSPEND_NOT_READY,
};
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode);
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 0e45963bb767..82d9daa17851 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -8,7 +8,7 @@
#ifndef __SOUND_HDA_CODEC_H
#define __SOUND_HDA_CODEC_H
-#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/mod_devicetable.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -166,8 +166,8 @@ struct hda_pcm {
bool own_chmap; /* codec driver provides own channel maps */
/* private: */
struct hda_codec *codec;
- struct kref kref;
struct list_head list;
+ unsigned int disconnected:1;
};
/* codec information */
@@ -187,6 +187,8 @@ struct hda_codec {
/* PCM to create, set by patch_ops.build_pcms callback */
struct list_head pcm_list_head;
+ refcount_t pcm_ref;
+ wait_queue_head_t remove_sleep;
/* codec specific info */
void *spec;
@@ -420,7 +422,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm)
{
- kref_get(&pcm->kref);
+ refcount_inc(&pcm->codec->pcm_ref);
}
void snd_hda_codec_pcm_put(struct hda_pcm *pcm);
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 22af68b01426..6a90ce405e60 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -558,6 +558,7 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
+void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
unsigned int streams, unsigned int reg);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index d4e31ea16aba..77123c3e4095 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -78,36 +78,35 @@ struct hdac_ext_stream {
container_of(s, struct hdac_ext_stream, hstream)
void snd_hdac_ext_stream_init(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, int idx,
- int direction, int tag);
+ struct hdac_ext_stream *hext_stream, int idx,
+ int direction, int tag);
int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
- int num_stream, int dir);
+ int num_stream, int dir);
void snd_hdac_stream_free_all(struct hdac_bus *bus);
void snd_hdac_link_free_all(struct hdac_bus *bus);
struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type);
-void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
+void snd_hdac_ext_stream_release(struct hdac_ext_stream *hext_stream, int type);
void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
- struct hdac_ext_stream *azx_dev, bool decouple);
+ struct hdac_ext_stream *hext_stream, bool decouple);
void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
+ struct hdac_ext_stream *hext_stream, u32 value);
int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
- struct hdac_ext_stream *stream);
+ struct hdac_ext_stream *hext_stream);
void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
+ struct hdac_ext_stream *hext_stream, u32 value);
+int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *hext_stream, u32 value);
-void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
-int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *stream, int fmt);
+void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hext_stream);
+int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *hext_stream, int fmt);
struct hdac_ext_link {
struct hdac_bus *bus;
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index d0574805865f..089a760d36eb 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -10,6 +10,14 @@
#include <linux/acpi.h>
+enum nhlt_link_type {
+ NHLT_LINK_HDA = 0,
+ NHLT_LINK_DSP = 1,
+ NHLT_LINK_DMIC = 2,
+ NHLT_LINK_SSP = 3,
+ NHLT_LINK_INVALID
+};
+
#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
struct wav_fmt {
@@ -33,14 +41,6 @@ struct wav_fmt_ext {
u8 sub_fmt[16];
} __packed;
-enum nhlt_link_type {
- NHLT_LINK_HDA = 0,
- NHLT_LINK_DSP = 1,
- NHLT_LINK_DMIC = 2,
- NHLT_LINK_SSP = 3,
- NHLT_LINK_INVALID
-};
-
enum nhlt_device_type {
NHLT_DEVICE_BT = 0,
NHLT_DEVICE_DMIC = 1,
@@ -132,6 +132,12 @@ void intel_nhlt_free(struct nhlt_acpi_table *addr);
int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt);
+bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type);
+struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type);
+
#else
struct nhlt_acpi_table;
@@ -150,6 +156,21 @@ static inline int intel_nhlt_get_dmic_geo(struct device *dev,
{
return 0;
}
+
+static inline bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt,
+ u8 link_type)
+{
+ return false;
+}
+
+static inline struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type)
+{
+ return NULL;
+}
+
#endif
#endif
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 1051b84e8579..653dfffb3ac8 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -36,13 +36,6 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
-#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
-#else
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
-#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
-#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
#else
@@ -51,6 +44,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
+#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
+#else
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
+#endif
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 33451f8ff755..9b187d86e1bd 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -147,6 +147,9 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_S24_BE _SNDRV_PCM_FMTBIT(S24_BE)
#define SNDRV_PCM_FMTBIT_U24_LE _SNDRV_PCM_FMTBIT(U24_LE)
#define SNDRV_PCM_FMTBIT_U24_BE _SNDRV_PCM_FMTBIT(U24_BE)
+// For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+// available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+// `right-padding` sample which has less width than 32 bit.
#define SNDRV_PCM_FMTBIT_S32_LE _SNDRV_PCM_FMTBIT(S32_LE)
#define SNDRV_PCM_FMTBIT_S32_BE _SNDRV_PCM_FMTBIT(S32_BE)
#define SNDRV_PCM_FMTBIT_U32_LE _SNDRV_PCM_FMTBIT(U32_LE)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index fb11c7693b25..c2b36f7d917d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -749,7 +749,7 @@ struct se_lun {
/* ALUA target port group linkage */
struct list_head lun_tg_pt_gp_link;
- struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
+ struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp;
spinlock_t lun_tg_pt_gp_lock;
struct se_portal_group *lun_tpg;
@@ -812,8 +812,9 @@ struct se_device {
atomic_long_t read_bytes;
atomic_long_t write_bytes;
/* Active commands on this virtual SE device */
- atomic_t simple_cmds;
- atomic_t dev_ordered_sync;
+ atomic_t non_ordered;
+ bool ordered_sync_in_progress;
+ atomic_t delayed_cmd_count;
atomic_t dev_qf_count;
u32 export_count;
spinlock_t delayed_cmd_lock;
@@ -834,6 +835,7 @@ struct se_device {
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
struct work_struct qf_work_queue;
+ struct work_struct delayed_cmd_work;
struct list_head delayed_cmd_list;
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
@@ -900,6 +902,7 @@ struct se_portal_group {
* Negative values can be used by fabric drivers for internal use TPGs.
*/
int proto_id;
+ bool enabled;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 3c5ade7a04a6..38f0662476d1 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -89,6 +89,7 @@ struct target_core_fabric_ops {
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
const char *);
+ int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index a23be89119aa..a8e97f84b652 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -93,8 +93,7 @@ __section("__bpf_raw_tp_map") = { \
#define FIRST(x, ...) x
-#undef DEFINE_EVENT_WRITABLE
-#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size) \
static inline void bpf_test_buffer_##call(void) \
{ \
/* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
@@ -103,8 +102,12 @@ static inline void bpf_test_buffer_##call(void) \
*/ \
FIRST(proto); \
(void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \
-} \
-__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
+}
+
+#undef DEFINE_EVENT_WRITABLE
+#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+ __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
+ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
@@ -119,9 +122,17 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
__BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
__DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
+#undef DECLARE_TRACE_WRITABLE
+#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
+ __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
+ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef DECLARE_TRACE_WRITABLE
#undef DEFINE_EVENT_WRITABLE
+#undef __CHECK_WRITABLE_BUF_SIZE
#undef __DEFINE_EVENT
#undef FIRST
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index bca73e8c8cde..499f5fabd20f 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -1016,31 +1016,32 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
-TRACE_EVENT(afs_page_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where, struct page *page),
+TRACE_EVENT(afs_folio_dirty,
+ TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
- TP_ARGS(vnode, where, page),
+ TP_ARGS(vnode, where, folio),
TP_STRUCT__entry(
__field(struct afs_vnode *, vnode )
__field(const char *, where )
- __field(pgoff_t, page )
+ __field(pgoff_t, index )
__field(unsigned long, from )
__field(unsigned long, to )
),
TP_fast_assign(
+ unsigned long priv = (unsigned long)folio_get_private(folio);
__entry->vnode = vnode;
__entry->where = where;
- __entry->page = page->index;
- __entry->from = afs_page_dirty_from(page, page->private);
- __entry->to = afs_page_dirty_to(page, page->private);
- __entry->to |= (afs_is_page_dirty_mmapped(page->private) ?
- (1UL << (BITS_PER_LONG - 1)) : 0);
+ __entry->index = folio_index(folio);
+ __entry->from = afs_folio_dirty_from(folio, priv);
+ __entry->to = afs_folio_dirty_to(folio, priv);
+ __entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
+ (1UL << (BITS_PER_LONG - 1)) : 0);
),
TP_printk("vn=%p %lx %s %lx-%lx%s",
- __entry->vnode, __entry->page, __entry->where,
+ __entry->vnode, __entry->index, __entry->where,
__entry->from,
__entry->to & ~(1UL << (BITS_PER_LONG - 1)),
__entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index cc5ab96a7471..a95daa4d4caa 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -114,7 +114,7 @@ TRACE_EVENT(block_rq_requeue,
*/
TRACE_EVENT(block_rq_complete,
- TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
TP_ARGS(rq, error, nr_bytes),
@@ -122,7 +122,7 @@ TRACE_EVENT(block_rq_complete,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, error )
+ __field( int , error )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -131,7 +131,7 @@ TRACE_EVENT(block_rq_complete,
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
- __entry->error = error;
+ __entry->error = blk_status_to_errno(error);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 44d8e2981065..2814f188d98c 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -21,9 +21,9 @@ TRACE_EVENT(devlink_hwmsg,
TP_ARGS(devlink, incoming, type, buf, len),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(bool, incoming)
__field(unsigned long, type)
__dynamic_array(u8, buf, len)
@@ -31,9 +31,9 @@ TRACE_EVENT(devlink_hwmsg,
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->incoming = incoming;
__entry->type = type;
memcpy(__get_dynamic_array(buf), buf, len);
@@ -55,17 +55,17 @@ TRACE_EVENT(devlink_hwerr,
TP_ARGS(devlink, err, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(int, err)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->err = err;
__assign_str(msg, msg);
),
@@ -85,17 +85,17 @@ TRACE_EVENT(devlink_health_report,
TP_ARGS(devlink, reporter_name, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, msg)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__assign_str(msg, msg);
),
@@ -116,18 +116,18 @@ TRACE_EVENT(devlink_health_recover_aborted,
TP_ARGS(devlink, reporter_name, health_state, time_since_last_recover),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(bool, health_state)
__field(u64, time_since_last_recover)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->health_state = health_state;
__entry->time_since_last_recover = time_since_last_recover;
@@ -150,17 +150,17 @@ TRACE_EVENT(devlink_health_reporter_state_update,
TP_ARGS(devlink, reporter_name, new_state),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(u8, new_state)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->new_state = new_state;
),
@@ -181,9 +181,9 @@ TRACE_EVENT(devlink_trap_report,
TP_ARGS(devlink, skb, metadata),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(trap_name, metadata->trap_name)
__string(trap_group_name, metadata->trap_group_name)
__dynamic_array(char, input_dev_name, IFNAMSIZ)
@@ -192,9 +192,9 @@ TRACE_EVENT(devlink_trap_report,
TP_fast_assign(
struct net_device *input_dev = metadata->input_dev;
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(trap_name, metadata->trap_name);
__assign_str(trap_group_name, metadata->trap_group_name);
__assign_str(input_dev_name,
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index db4f2cec8360..16ae7b666810 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -24,7 +24,7 @@ struct erofs_map_blocks;
#define show_mflags(flags) __print_flags(flags, "", \
{ EROFS_MAP_MAPPED, "M" }, \
{ EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ZIPPED, "Z" })
+ { EROFS_MAP_ENCODED, "E" })
TRACE_EVENT(erofs_lookup,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 4e881d91c874..f8cb916f3595 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -570,9 +570,10 @@ TRACE_EVENT(f2fs_file_write_iter,
);
TRACE_EVENT(f2fs_map_blocks,
- TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map,
+ int create, int flag, int ret),
- TP_ARGS(inode, map, ret),
+ TP_ARGS(inode, map, create, flag, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -583,11 +584,14 @@ TRACE_EVENT(f2fs_map_blocks,
__field(unsigned int, m_flags)
__field(int, m_seg_type)
__field(bool, m_may_create)
+ __field(bool, m_multidev_dio)
+ __field(int, create)
+ __field(int, flag)
__field(int, ret)
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev = map->m_bdev->bd_dev;
__entry->ino = inode->i_ino;
__entry->m_lblk = map->m_lblk;
__entry->m_pblk = map->m_pblk;
@@ -595,12 +599,16 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags = map->m_flags;
__entry->m_seg_type = map->m_seg_type;
__entry->m_may_create = map->m_may_create;
+ __entry->m_multidev_dio = map->m_multidev_dio;
+ __entry->create = create;
+ __entry->flag = flag;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
- "start blkaddr = 0x%llx, len = 0x%llx, flags = %u,"
- "seg_type = %d, may_create = %d, err = %d",
+ "start blkaddr = 0x%llx, len = 0x%llx, flags = %u, "
+ "seg_type = %d, may_create = %d, multidevice = %d, "
+ "create = %d, flag = %d, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
@@ -608,6 +616,9 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags,
__entry->m_seg_type,
__entry->m_may_create,
+ __entry->m_multidev_dio,
+ __entry->create,
+ __entry->flag,
__entry->ret)
);
@@ -807,20 +818,20 @@ TRACE_EVENT(f2fs_lookup_start,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
@@ -834,7 +845,7 @@ TRACE_EVENT(f2fs_lookup_end,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(nid_t, cino)
__field(int, err)
),
@@ -842,14 +853,14 @@ TRACE_EVENT(f2fs_lookup_end,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->cino = ino;
__entry->err = err;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->cino,
__entry->err)
);
diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
new file mode 100644
index 000000000000..738b97f22f36
--- /dev/null
+++ b/include/trace/events/fs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Display helpers for generic filesystem items
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/fs.h>
+
+#define show_fs_dirent_type(x) \
+ __print_symbolic(x, \
+ { DT_UNKNOWN, "UNKNOWN" }, \
+ { DT_FIFO, "FIFO" }, \
+ { DT_CHR, "CHR" }, \
+ { DT_DIR, "DIR" }, \
+ { DT_BLK, "BLK" }, \
+ { DT_REG, "REG" }, \
+ { DT_LNK, "LNK" }, \
+ { DT_SOCK, "SOCK" }, \
+ { DT_WHT, "WHT" })
+
+#define show_fs_fcntl_open_flags(x) \
+ __print_flags(x, "|", \
+ { O_WRONLY, "O_WRONLY" }, \
+ { O_RDWR, "O_RDWR" }, \
+ { O_CREAT, "O_CREAT" }, \
+ { O_EXCL, "O_EXCL" }, \
+ { O_NOCTTY, "O_NOCTTY" }, \
+ { O_TRUNC, "O_TRUNC" }, \
+ { O_APPEND, "O_APPEND" }, \
+ { O_NONBLOCK, "O_NONBLOCK" }, \
+ { O_DSYNC, "O_DSYNC" }, \
+ { O_DIRECT, "O_DIRECT" }, \
+ { O_LARGEFILE, "O_LARGEFILE" }, \
+ { O_DIRECTORY, "O_DIRECTORY" }, \
+ { O_NOFOLLOW, "O_NOFOLLOW" }, \
+ { O_NOATIME, "O_NOATIME" }, \
+ { O_CLOEXEC, "O_CLOEXEC" })
+
+#define __fmode_flag(x) { (__force unsigned long)FMODE_##x, #x }
+#define show_fs_fmode_flags(x) \
+ __print_flags(x, "|", \
+ __fmode_flag(READ), \
+ __fmode_flag(WRITE), \
+ __fmode_flag(EXEC))
+
+#ifdef CONFIG_64BIT
+#define show_fs_fcntl_cmd(x) \
+ __print_symbolic(x, \
+ { F_DUPFD, "DUPFD" }, \
+ { F_GETFD, "GETFD" }, \
+ { F_SETFD, "SETFD" }, \
+ { F_GETFL, "GETFL" }, \
+ { F_SETFL, "SETFL" }, \
+ { F_GETLK, "GETLK" }, \
+ { F_SETLK, "SETLK" }, \
+ { F_SETLKW, "SETLKW" }, \
+ { F_SETOWN, "SETOWN" }, \
+ { F_GETOWN, "GETOWN" }, \
+ { F_SETSIG, "SETSIG" }, \
+ { F_GETSIG, "GETSIG" }, \
+ { F_SETOWN_EX, "SETOWN_EX" }, \
+ { F_GETOWN_EX, "GETOWN_EX" }, \
+ { F_GETOWNER_UIDS, "GETOWNER_UIDS" }, \
+ { F_OFD_GETLK, "OFD_GETLK" }, \
+ { F_OFD_SETLK, "OFD_SETLK" }, \
+ { F_OFD_SETLKW, "OFD_SETLKW" })
+#else /* CONFIG_64BIT */
+#define show_fs_fcntl_cmd(x) \
+ __print_symbolic(x, \
+ { F_DUPFD, "DUPFD" }, \
+ { F_GETFD, "GETFD" }, \
+ { F_SETFD, "SETFD" }, \
+ { F_GETFL, "GETFL" }, \
+ { F_SETFL, "SETFL" }, \
+ { F_GETLK, "GETLK" }, \
+ { F_SETLK, "SETLK" }, \
+ { F_SETLKW, "SETLKW" }, \
+ { F_SETOWN, "SETOWN" }, \
+ { F_GETOWN, "GETOWN" }, \
+ { F_SETSIG, "SETSIG" }, \
+ { F_GETSIG, "GETSIG" }, \
+ { F_GETLK64, "GETLK64" }, \
+ { F_SETLK64, "SETLK64" }, \
+ { F_SETLKW64, "SETLKW64" }, \
+ { F_SETOWN_EX, "SETOWN_EX" }, \
+ { F_GETOWN_EX, "GETOWN_EX" }, \
+ { F_GETOWNER_UIDS, "GETOWNER_UIDS" }, \
+ { F_OFD_GETLK, "OFD_GETLK" }, \
+ { F_OFD_SETLK, "OFD_SETLK" }, \
+ { F_OFD_SETLKW, "OFD_SETLKW" })
+#endif /* CONFIG_64BIT */
+
+#define show_fs_fcntl_lock_type(x) \
+ __print_symbolic(x, \
+ { F_RDLCK, "RDLCK" }, \
+ { F_WRLCK, "WRLCK" }, \
+ { F_UNLCK, "UNLCK" })
+
+#define show_fs_lookup_flags(flags) \
+ __print_flags(flags, "|", \
+ { LOOKUP_FOLLOW, "FOLLOW" }, \
+ { LOOKUP_DIRECTORY, "DIRECTORY" }, \
+ { LOOKUP_AUTOMOUNT, "AUTOMOUNT" }, \
+ { LOOKUP_EMPTY, "EMPTY" }, \
+ { LOOKUP_DOWN, "DOWN" }, \
+ { LOOKUP_MOUNTPOINT, "MOUNTPOINT" }, \
+ { LOOKUP_REVAL, "REVAL" }, \
+ { LOOKUP_RCU, "RCU" }, \
+ { LOOKUP_OPEN, "OPEN" }, \
+ { LOOKUP_CREATE, "CREATE" }, \
+ { LOOKUP_EXCL, "EXCL" }, \
+ { LOOKUP_RENAME_TARGET, "RENAME_TARGET" }, \
+ { LOOKUP_PARENT, "PARENT" }, \
+ { LOOKUP_NO_SYMLINKS, "NO_SYMLINKS" }, \
+ { LOOKUP_NO_MAGICLINKS, "NO_MAGICLINKS" }, \
+ { LOOKUP_NO_XDEV, "NO_XDEV" }, \
+ { LOOKUP_BENEATH, "BENEATH" }, \
+ { LOOKUP_IN_ROOT, "IN_ROOT" }, \
+ { LOOKUP_CACHED, "CACHED" })
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 0dd30de00e5b..7346f0164cf4 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -6,6 +6,7 @@
#define _TRACE_IO_URING_H
#include <linux/tracepoint.h>
+#include <uapi/linux/io_uring.h>
struct io_wq_work;
@@ -497,6 +498,66 @@ TRACE_EVENT(io_uring_task_run,
(unsigned long long) __entry->user_data)
);
+/*
+ * io_uring_req_failed - called when an sqe is errored dring submission
+ *
+ * @sqe: pointer to the io_uring_sqe that failed
+ * @error: error it failed with
+ *
+ * Allows easier diagnosing of malformed requests in production systems.
+ */
+TRACE_EVENT(io_uring_req_failed,
+
+ TP_PROTO(const struct io_uring_sqe *sqe, int error),
+
+ TP_ARGS(sqe, error),
+
+ TP_STRUCT__entry (
+ __field( u8, opcode )
+ __field( u8, flags )
+ __field( u8, ioprio )
+ __field( u64, off )
+ __field( u64, addr )
+ __field( u32, len )
+ __field( u32, op_flags )
+ __field( u64, user_data )
+ __field( u16, buf_index )
+ __field( u16, personality )
+ __field( u32, file_index )
+ __field( u64, pad1 )
+ __field( u64, pad2 )
+ __field( int, error )
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = sqe->opcode;
+ __entry->flags = sqe->flags;
+ __entry->ioprio = sqe->ioprio;
+ __entry->off = sqe->off;
+ __entry->addr = sqe->addr;
+ __entry->len = sqe->len;
+ __entry->op_flags = sqe->rw_flags;
+ __entry->user_data = sqe->user_data;
+ __entry->buf_index = sqe->buf_index;
+ __entry->personality = sqe->personality;
+ __entry->file_index = sqe->file_index;
+ __entry->pad1 = sqe->__pad2[0];
+ __entry->pad2 = sqe->__pad2[1];
+ __entry->error = error;
+ ),
+
+ TP_printk("op %d, flags=0x%x, prio=%d, off=%llu, addr=%llu, "
+ "len=%u, rw_flags=0x%x, user_data=0x%llx, buf_index=%d, "
+ "personality=%d, file_index=%d, pad=0x%llx/%llx, error=%d",
+ __entry->opcode, __entry->flags, __entry->ioprio,
+ (unsigned long long)__entry->off,
+ (unsigned long long) __entry->addr, __entry->len,
+ __entry->op_flags, (unsigned long long) __entry->user_data,
+ __entry->buf_index, __entry->personality, __entry->file_index,
+ (unsigned long long) __entry->pad1,
+ (unsigned long long) __entry->pad2, __entry->error)
+);
+
#endif /* _TRACE_IO_URING_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mctp.h b/include/trace/events/mctp.h
new file mode 100644
index 000000000000..175b057c507f
--- /dev/null
+++ b/include/trace/events/mctp.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mctp
+
+#if !defined(_TRACE_MCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MCTP_H
+
+#include <linux/tracepoint.h>
+
+#ifndef __TRACE_MCTP_ENUMS
+#define __TRACE_MCTP_ENUMS
+enum {
+ MCTP_TRACE_KEY_TIMEOUT,
+ MCTP_TRACE_KEY_REPLIED,
+ MCTP_TRACE_KEY_INVALIDATED,
+ MCTP_TRACE_KEY_CLOSED,
+};
+#endif /* __TRACE_MCTP_ENUMS */
+
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_TIMEOUT);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED);
+
+TRACE_EVENT(mctp_key_acquire,
+ TP_PROTO(const struct mctp_sk_key *key),
+ TP_ARGS(key),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ ),
+ TP_printk("local %d, peer %d, tag %1x",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag
+ )
+);
+
+TRACE_EVENT(mctp_key_release,
+ TP_PROTO(const struct mctp_sk_key *key, int reason),
+ TP_ARGS(key, reason),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ __field(int, reason)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ __entry->reason = reason;
+ ),
+ TP_printk("local %d, peer %d, tag %1x %s",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag,
+ __print_symbolic(__entry->reason,
+ { MCTP_TRACE_KEY_TIMEOUT, "timeout" },
+ { MCTP_TRACE_KEY_REPLIED, "replied" },
+ { MCTP_TRACE_KEY_INVALIDATED, "invalidated" },
+ { MCTP_TRACE_KEY_CLOSED, "closed" })
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h
index 0abff67b96f0..14db8044c1ff 100644
--- a/include/trace/events/mmap_lock.h
+++ b/include/trace/events/mmap_lock.h
@@ -13,7 +13,7 @@ struct mm_struct;
extern int trace_mmap_lock_reg(void);
extern void trace_mmap_lock_unreg(void);
-TRACE_EVENT_FN(mmap_lock_start_locking,
+DECLARE_EVENT_CLASS(mmap_lock,
TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
@@ -32,15 +32,23 @@ TRACE_EVENT_FN(mmap_lock_start_locking,
),
TP_printk(
- "mm=%p memcg_path=%s write=%s\n",
+ "mm=%p memcg_path=%s write=%s",
__entry->mm,
__get_str(memcg_path),
__entry->write ? "true" : "false"
- ),
-
- trace_mmap_lock_reg, trace_mmap_lock_unreg
+ )
);
+#define DEFINE_MMAP_LOCK_EVENT(name) \
+ DEFINE_EVENT_FN(mmap_lock, name, \
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, \
+ bool write), \
+ TP_ARGS(mm, memcg_path, write), \
+ trace_mmap_lock_reg, trace_mmap_lock_unreg)
+
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
+
TRACE_EVENT_FN(mmap_lock_acquire_returned,
TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write,
@@ -63,7 +71,7 @@ TRACE_EVENT_FN(mmap_lock_acquire_returned,
),
TP_printk(
- "mm=%p memcg_path=%s write=%s success=%s\n",
+ "mm=%p memcg_path=%s write=%s success=%s",
__entry->mm,
__get_str(memcg_path),
__entry->write ? "true" : "false",
@@ -73,34 +81,6 @@ TRACE_EVENT_FN(mmap_lock_acquire_returned,
trace_mmap_lock_reg, trace_mmap_lock_unreg
);
-TRACE_EVENT_FN(mmap_lock_released,
-
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
-
- TP_ARGS(mm, memcg_path, write),
-
- TP_STRUCT__entry(
- __field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
- __field(bool, write)
- ),
-
- TP_fast_assign(
- __entry->mm = mm;
- __assign_str(memcg_path, memcg_path);
- __entry->write = write;
- ),
-
- TP_printk(
- "mm=%p memcg_path=%s write=%s\n",
- __entry->mm,
- __get_str(memcg_path),
- __entry->write ? "true" : "false"
- ),
-
- trace_mmap_lock_reg, trace_mmap_lock_unreg
-);
-
#endif /* _TRACE_MMAP_LOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/nfs.h b/include/trace/events/nfs.h
new file mode 100644
index 000000000000..09ffdbb04134
--- /dev/null
+++ b/include/trace/events/nfs.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Display helpers for NFS protocol elements
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <uapi/linux/nfs.h>
+
+TRACE_DEFINE_ENUM(NFS_OK);
+TRACE_DEFINE_ENUM(NFSERR_PERM);
+TRACE_DEFINE_ENUM(NFSERR_NOENT);
+TRACE_DEFINE_ENUM(NFSERR_IO);
+TRACE_DEFINE_ENUM(NFSERR_NXIO);
+TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
+TRACE_DEFINE_ENUM(NFSERR_ACCES);
+TRACE_DEFINE_ENUM(NFSERR_EXIST);
+TRACE_DEFINE_ENUM(NFSERR_XDEV);
+TRACE_DEFINE_ENUM(NFSERR_NODEV);
+TRACE_DEFINE_ENUM(NFSERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFSERR_ISDIR);
+TRACE_DEFINE_ENUM(NFSERR_INVAL);
+TRACE_DEFINE_ENUM(NFSERR_FBIG);
+TRACE_DEFINE_ENUM(NFSERR_NOSPC);
+TRACE_DEFINE_ENUM(NFSERR_ROFS);
+TRACE_DEFINE_ENUM(NFSERR_MLINK);
+TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
+TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFSERR_DQUOT);
+TRACE_DEFINE_ENUM(NFSERR_STALE);
+TRACE_DEFINE_ENUM(NFSERR_REMOTE);
+TRACE_DEFINE_ENUM(NFSERR_WFLUSH);
+TRACE_DEFINE_ENUM(NFSERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFSERR_NOT_SYNC);
+TRACE_DEFINE_ENUM(NFSERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFSERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFSERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFSERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFSERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
+
+#define show_nfs_status(x) \
+ __print_symbolic(x, \
+ { NFS_OK, "OK" }, \
+ { NFSERR_PERM, "PERM" }, \
+ { NFSERR_NOENT, "NOENT" }, \
+ { NFSERR_IO, "IO" }, \
+ { NFSERR_NXIO, "NXIO" }, \
+ { ECHILD, "CHILD" }, \
+ { NFSERR_EAGAIN, "AGAIN" }, \
+ { NFSERR_ACCES, "ACCES" }, \
+ { NFSERR_EXIST, "EXIST" }, \
+ { NFSERR_XDEV, "XDEV" }, \
+ { NFSERR_NODEV, "NODEV" }, \
+ { NFSERR_NOTDIR, "NOTDIR" }, \
+ { NFSERR_ISDIR, "ISDIR" }, \
+ { NFSERR_INVAL, "INVAL" }, \
+ { NFSERR_FBIG, "FBIG" }, \
+ { NFSERR_NOSPC, "NOSPC" }, \
+ { NFSERR_ROFS, "ROFS" }, \
+ { NFSERR_MLINK, "MLINK" }, \
+ { NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \
+ { NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFSERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFSERR_DQUOT, "DQUOT" }, \
+ { NFSERR_STALE, "STALE" }, \
+ { NFSERR_REMOTE, "REMOTE" }, \
+ { NFSERR_WFLUSH, "WFLUSH" }, \
+ { NFSERR_BADHANDLE, "BADHANDLE" }, \
+ { NFSERR_NOT_SYNC, "NOTSYNC" }, \
+ { NFSERR_BAD_COOKIE, "BADCOOKIE" }, \
+ { NFSERR_NOTSUPP, "NOTSUPP" }, \
+ { NFSERR_TOOSMALL, "TOOSMALL" }, \
+ { NFSERR_SERVERFAULT, "REMOTEIO" }, \
+ { NFSERR_BADTYPE, "BADTYPE" }, \
+ { NFSERR_JUKEBOX, "JUKEBOX" })
+
+TRACE_DEFINE_ENUM(NFS_UNSTABLE);
+TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
+TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
+
+#define show_nfs_stable_how(x) \
+ __print_symbolic(x, \
+ { NFS_UNSTABLE, "UNSTABLE" }, \
+ { NFS_DATA_SYNC, "DATA_SYNC" }, \
+ { NFS_FILE_SYNC, "FILE_SYNC" })
+
+TRACE_DEFINE_ENUM(NFS4_OK);
+TRACE_DEFINE_ENUM(NFS4ERR_ACCESS);
+TRACE_DEFINE_ENUM(NFS4ERR_ATTRNOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_ADMIN_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_BACK_CHAN_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_BADCHAR);
+TRACE_DEFINE_ENUM(NFS4ERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADIOMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLABEL);
+TRACE_DEFINE_ENUM(NFS4ERR_BADNAME);
+TRACE_DEFINE_ENUM(NFS4ERR_BADOWNER);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADXDR);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_HIGH_SLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SEQID);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SESSION_DIGEST);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_CB_PATH_DOWN);
+TRACE_DEFINE_ENUM(NFS4ERR_CLID_INUSE);
+TRACE_DEFINE_ENUM(NFS4ERR_CLIENTID_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_COMPLETE_ALREADY);
+TRACE_DEFINE_ENUM(NFS4ERR_CONN_NOT_BOUND_TO_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADLOCK);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DELAY);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_ALREADY_WANTED);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_DIRDELEG_UNAVAIL);
+TRACE_DEFINE_ENUM(NFS4ERR_DQUOT);
+TRACE_DEFINE_ENUM(NFS4ERR_ENCR_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_EXIST);
+TRACE_DEFINE_ENUM(NFS4ERR_EXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FBIG);
+TRACE_DEFINE_ENUM(NFS4ERR_FHEXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FILE_OPEN);
+TRACE_DEFINE_ENUM(NFS4ERR_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_HASH_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_INVAL);
+TRACE_DEFINE_ENUM(NFS4ERR_IO);
+TRACE_DEFINE_ENUM(NFS4ERR_ISDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTTRYLATER);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTUNAVAILABLE);
+TRACE_DEFINE_ENUM(NFS4ERR_LEASE_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKS_HELD);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCK_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_MINOR_VERS_MISMATCH);
+TRACE_DEFINE_ENUM(NFS4ERR_MLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFS4ERR_NOENT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOFILEHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_NOMATCHING_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOSPC);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_ONLY_OP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_NO_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_NXIO);
+TRACE_DEFINE_ENUM(NFS4ERR_OLD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_OPENMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_ILLEGAL);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_NOT_IN_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_PERM);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_IO_HOLE);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_NO_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECALLCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_BAD);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_CONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_REJECT_DELEG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+TRACE_DEFINE_ENUM(NFS4ERR_REQ_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_RESOURCE);
+TRACE_DEFINE_ENUM(NFS4ERR_RESTOREFH);
+TRACE_DEFINE_ENUM(NFS4ERR_RETRY_UNCACHED_REP);
+TRACE_DEFINE_ENUM(NFS4ERR_RETURNCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_ROFS);
+TRACE_DEFINE_ENUM(NFS4ERR_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_SHARE_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQUENCE_POS);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_FALSE_RETRY);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_MISORDERED);
+TRACE_DEFINE_ENUM(NFS4ERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_CLIENTID);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_SYMLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFS4ERR_TOO_MANY_OPS);
+TRACE_DEFINE_ENUM(NFS4ERR_UNKNOWN_LAYOUTTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_UNSAFE_COMPOUND);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONGSEC);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
+
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_MDS);
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
+
+#define show_nfs4_status(x) \
+ __print_symbolic(x, \
+ { NFS4_OK, "OK" }, \
+ { EPERM, "EPERM" }, \
+ { ENOENT, "ENOENT" }, \
+ { EIO, "EIO" }, \
+ { ENXIO, "ENXIO" }, \
+ { EACCES, "EACCES" }, \
+ { EEXIST, "EEXIST" }, \
+ { EXDEV, "EXDEV" }, \
+ { ENOTDIR, "ENOTDIR" }, \
+ { EISDIR, "EISDIR" }, \
+ { EFBIG, "EFBIG" }, \
+ { ENOSPC, "ENOSPC" }, \
+ { EROFS, "EROFS" }, \
+ { EMLINK, "EMLINK" }, \
+ { ENAMETOOLONG, "ENAMETOOLONG" }, \
+ { ENOTEMPTY, "ENOTEMPTY" }, \
+ { EDQUOT, "EDQUOT" }, \
+ { ESTALE, "ESTALE" }, \
+ { EBADHANDLE, "EBADHANDLE" }, \
+ { EBADCOOKIE, "EBADCOOKIE" }, \
+ { ENOTSUPP, "ENOTSUPP" }, \
+ { ETOOSMALL, "ETOOSMALL" }, \
+ { EREMOTEIO, "EREMOTEIO" }, \
+ { EBADTYPE, "EBADTYPE" }, \
+ { EAGAIN, "EAGAIN" }, \
+ { ELOOP, "ELOOP" }, \
+ { EOPNOTSUPP, "EOPNOTSUPP" }, \
+ { EDEADLK, "EDEADLK" }, \
+ { ENOMEM, "ENOMEM" }, \
+ { EKEYEXPIRED, "EKEYEXPIRED" }, \
+ { ETIMEDOUT, "ETIMEDOUT" }, \
+ { ERESTARTSYS, "ERESTARTSYS" }, \
+ { ECONNREFUSED, "ECONNREFUSED" }, \
+ { ECONNRESET, "ECONNRESET" }, \
+ { ENETUNREACH, "ENETUNREACH" }, \
+ { EHOSTUNREACH, "EHOSTUNREACH" }, \
+ { EHOSTDOWN, "EHOSTDOWN" }, \
+ { EPIPE, "EPIPE" }, \
+ { EPFNOSUPPORT, "EPFNOSUPPORT" }, \
+ { EPROTONOSUPPORT, "EPROTONOSUPPORT" }, \
+ { NFS4ERR_ACCESS, "ACCESS" }, \
+ { NFS4ERR_ATTRNOTSUPP, "ATTRNOTSUPP" }, \
+ { NFS4ERR_ADMIN_REVOKED, "ADMIN_REVOKED" }, \
+ { NFS4ERR_BACK_CHAN_BUSY, "BACK_CHAN_BUSY" }, \
+ { NFS4ERR_BADCHAR, "BADCHAR" }, \
+ { NFS4ERR_BADHANDLE, "BADHANDLE" }, \
+ { NFS4ERR_BADIOMODE, "BADIOMODE" }, \
+ { NFS4ERR_BADLAYOUT, "BADLAYOUT" }, \
+ { NFS4ERR_BADLABEL, "BADLABEL" }, \
+ { NFS4ERR_BADNAME, "BADNAME" }, \
+ { NFS4ERR_BADOWNER, "BADOWNER" }, \
+ { NFS4ERR_BADSESSION, "BADSESSION" }, \
+ { NFS4ERR_BADSLOT, "BADSLOT" }, \
+ { NFS4ERR_BADTYPE, "BADTYPE" }, \
+ { NFS4ERR_BADXDR, "BADXDR" }, \
+ { NFS4ERR_BAD_COOKIE, "BAD_COOKIE" }, \
+ { NFS4ERR_BAD_HIGH_SLOT, "BAD_HIGH_SLOT" }, \
+ { NFS4ERR_BAD_RANGE, "BAD_RANGE" }, \
+ { NFS4ERR_BAD_SEQID, "BAD_SEQID" }, \
+ { NFS4ERR_BAD_SESSION_DIGEST, "BAD_SESSION_DIGEST" }, \
+ { NFS4ERR_BAD_STATEID, "BAD_STATEID" }, \
+ { NFS4ERR_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
+ { NFS4ERR_CLID_INUSE, "CLID_INUSE" }, \
+ { NFS4ERR_CLIENTID_BUSY, "CLIENTID_BUSY" }, \
+ { NFS4ERR_COMPLETE_ALREADY, "COMPLETE_ALREADY" }, \
+ { NFS4ERR_CONN_NOT_BOUND_TO_SESSION, "CONN_NOT_BOUND_TO_SESSION" }, \
+ { NFS4ERR_DEADLOCK, "DEADLOCK" }, \
+ { NFS4ERR_DEADSESSION, "DEAD_SESSION" }, \
+ { NFS4ERR_DELAY, "DELAY" }, \
+ { NFS4ERR_DELEG_ALREADY_WANTED, "DELEG_ALREADY_WANTED" }, \
+ { NFS4ERR_DELEG_REVOKED, "DELEG_REVOKED" }, \
+ { NFS4ERR_DENIED, "DENIED" }, \
+ { NFS4ERR_DIRDELEG_UNAVAIL, "DIRDELEG_UNAVAIL" }, \
+ { NFS4ERR_DQUOT, "DQUOT" }, \
+ { NFS4ERR_ENCR_ALG_UNSUPP, "ENCR_ALG_UNSUPP" }, \
+ { NFS4ERR_EXIST, "EXIST" }, \
+ { NFS4ERR_EXPIRED, "EXPIRED" }, \
+ { NFS4ERR_FBIG, "FBIG" }, \
+ { NFS4ERR_FHEXPIRED, "FHEXPIRED" }, \
+ { NFS4ERR_FILE_OPEN, "FILE_OPEN" }, \
+ { NFS4ERR_GRACE, "GRACE" }, \
+ { NFS4ERR_HASH_ALG_UNSUPP, "HASH_ALG_UNSUPP" }, \
+ { NFS4ERR_INVAL, "INVAL" }, \
+ { NFS4ERR_IO, "IO" }, \
+ { NFS4ERR_ISDIR, "ISDIR" }, \
+ { NFS4ERR_LAYOUTTRYLATER, "LAYOUTTRYLATER" }, \
+ { NFS4ERR_LAYOUTUNAVAILABLE, "LAYOUTUNAVAILABLE" }, \
+ { NFS4ERR_LEASE_MOVED, "LEASE_MOVED" }, \
+ { NFS4ERR_LOCKED, "LOCKED" }, \
+ { NFS4ERR_LOCKS_HELD, "LOCKS_HELD" }, \
+ { NFS4ERR_LOCK_RANGE, "LOCK_RANGE" }, \
+ { NFS4ERR_MINOR_VERS_MISMATCH, "MINOR_VERS_MISMATCH" }, \
+ { NFS4ERR_MLINK, "MLINK" }, \
+ { NFS4ERR_MOVED, "MOVED" }, \
+ { NFS4ERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFS4ERR_NOENT, "NOENT" }, \
+ { NFS4ERR_NOFILEHANDLE, "NOFILEHANDLE" }, \
+ { NFS4ERR_NOMATCHING_LAYOUT, "NOMATCHING_LAYOUT" }, \
+ { NFS4ERR_NOSPC, "NOSPC" }, \
+ { NFS4ERR_NOTDIR, "NOTDIR" }, \
+ { NFS4ERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFS4ERR_NOTSUPP, "NOTSUPP" }, \
+ { NFS4ERR_NOT_ONLY_OP, "NOT_ONLY_OP" }, \
+ { NFS4ERR_NOT_SAME, "NOT_SAME" }, \
+ { NFS4ERR_NO_GRACE, "NO_GRACE" }, \
+ { NFS4ERR_NXIO, "NXIO" }, \
+ { NFS4ERR_OLD_STATEID, "OLD_STATEID" }, \
+ { NFS4ERR_OPENMODE, "OPENMODE" }, \
+ { NFS4ERR_OP_ILLEGAL, "OP_ILLEGAL" }, \
+ { NFS4ERR_OP_NOT_IN_SESSION, "OP_NOT_IN_SESSION" }, \
+ { NFS4ERR_PERM, "PERM" }, \
+ { NFS4ERR_PNFS_IO_HOLE, "PNFS_IO_HOLE" }, \
+ { NFS4ERR_PNFS_NO_LAYOUT, "PNFS_NO_LAYOUT" }, \
+ { NFS4ERR_RECALLCONFLICT, "RECALLCONFLICT" }, \
+ { NFS4ERR_RECLAIM_BAD, "RECLAIM_BAD" }, \
+ { NFS4ERR_RECLAIM_CONFLICT, "RECLAIM_CONFLICT" }, \
+ { NFS4ERR_REJECT_DELEG, "REJECT_DELEG" }, \
+ { NFS4ERR_REP_TOO_BIG, "REP_TOO_BIG" }, \
+ { NFS4ERR_REP_TOO_BIG_TO_CACHE, "REP_TOO_BIG_TO_CACHE" }, \
+ { NFS4ERR_REQ_TOO_BIG, "REQ_TOO_BIG" }, \
+ { NFS4ERR_RESOURCE, "RESOURCE" }, \
+ { NFS4ERR_RESTOREFH, "RESTOREFH" }, \
+ { NFS4ERR_RETRY_UNCACHED_REP, "RETRY_UNCACHED_REP" }, \
+ { NFS4ERR_RETURNCONFLICT, "RETURNCONFLICT" }, \
+ { NFS4ERR_ROFS, "ROFS" }, \
+ { NFS4ERR_SAME, "SAME" }, \
+ { NFS4ERR_SHARE_DENIED, "SHARE_DENIED" }, \
+ { NFS4ERR_SEQUENCE_POS, "SEQUENCE_POS" }, \
+ { NFS4ERR_SEQ_FALSE_RETRY, "SEQ_FALSE_RETRY" }, \
+ { NFS4ERR_SEQ_MISORDERED, "SEQ_MISORDERED" }, \
+ { NFS4ERR_SERVERFAULT, "SERVERFAULT" }, \
+ { NFS4ERR_STALE, "STALE" }, \
+ { NFS4ERR_STALE_CLIENTID, "STALE_CLIENTID" }, \
+ { NFS4ERR_STALE_STATEID, "STALE_STATEID" }, \
+ { NFS4ERR_SYMLINK, "SYMLINK" }, \
+ { NFS4ERR_TOOSMALL, "TOOSMALL" }, \
+ { NFS4ERR_TOO_MANY_OPS, "TOO_MANY_OPS" }, \
+ { NFS4ERR_UNKNOWN_LAYOUTTYPE, "UNKNOWN_LAYOUTTYPE" }, \
+ { NFS4ERR_UNSAFE_COMPOUND, "UNSAFE_COMPOUND" }, \
+ { NFS4ERR_WRONGSEC, "WRONGSEC" }, \
+ { NFS4ERR_WRONG_CRED, "WRONG_CRED" }, \
+ { NFS4ERR_WRONG_TYPE, "WRONG_TYPE" }, \
+ { NFS4ERR_XDEV, "XDEV" }, \
+ /* ***** Internal to Linux NFS client ***** */ \
+ { NFS4ERR_RESET_TO_MDS, "RESET_TO_MDS" }, \
+ { NFS4ERR_RESET_TO_PNFS, "RESET_TO_PNFS" })
+
+#define show_nfs4_verifier(x) \
+ __print_hex_str(x, NFS4_VERIFIER_SIZE)
+
+TRACE_DEFINE_ENUM(IOMODE_READ);
+TRACE_DEFINE_ENUM(IOMODE_RW);
+TRACE_DEFINE_ENUM(IOMODE_ANY);
+
+#define show_pnfs_layout_iomode(x) \
+ __print_symbolic(x, \
+ { IOMODE_READ, "READ" }, \
+ { IOMODE_RW, "RW" }, \
+ { IOMODE_ANY, "ANY" })
+
+#define show_nfs4_seq4_status(x) \
+ __print_flags(x, "|", \
+ { SEQ4_STATUS_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
+ { SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING, "CB_GSS_CONTEXTS_EXPIRING" }, \
+ { SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED, "CB_GSS_CONTEXTS_EXPIRED" }, \
+ { SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED, "EXPIRED_ALL_STATE_REVOKED" }, \
+ { SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED, "EXPIRED_SOME_STATE_REVOKED" }, \
+ { SEQ4_STATUS_ADMIN_STATE_REVOKED, "ADMIN_STATE_REVOKED" }, \
+ { SEQ4_STATUS_RECALLABLE_STATE_REVOKED, "RECALLABLE_STATE_REVOKED" }, \
+ { SEQ4_STATUS_LEASE_MOVED, "LEASE_MOVED" }, \
+ { SEQ4_STATUS_RESTART_RECLAIM_NEEDED, "RESTART_RECLAIM_NEEDED" }, \
+ { SEQ4_STATUS_CB_PATH_DOWN_SESSION, "CB_PATH_DOWN_SESSION" }, \
+ { SEQ4_STATUS_BACKCHANNEL_FAULT, "BACKCHANNEL_FAULT" })
diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
index 1d28431e85bd..171524d3526d 100644
--- a/include/trace/events/pagemap.h
+++ b/include/trace/events/pagemap.h
@@ -16,38 +16,38 @@
#define PAGEMAP_MAPPEDDISK 0x0020u
#define PAGEMAP_BUFFERS 0x0040u
-#define trace_pagemap_flags(page) ( \
- (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
- (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \
- (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \
- (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \
- (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
- (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \
+#define trace_pagemap_flags(folio) ( \
+ (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
+ (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \
+ (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \
+ (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \
+ (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
+ (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \
)
TRACE_EVENT(mm_lru_insertion,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
- __field(struct page *, page )
+ __field(struct folio *, folio )
__field(unsigned long, pfn )
__field(enum lru_list, lru )
__field(unsigned long, flags )
),
TP_fast_assign(
- __entry->page = page;
- __entry->pfn = page_to_pfn(page);
- __entry->lru = page_lru(page);
- __entry->flags = trace_pagemap_flags(page);
+ __entry->folio = folio;
+ __entry->pfn = folio_pfn(folio);
+ __entry->lru = folio_lru_list(folio);
+ __entry->flags = trace_pagemap_flags(folio);
),
/* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
- __entry->page,
+ TP_printk("folio=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
+ __entry->folio,
__entry->pfn,
__entry->lru,
__entry->flags & PAGEMAP_MAPPED ? "M" : " ",
@@ -60,23 +60,21 @@ TRACE_EVENT(mm_lru_insertion,
TRACE_EVENT(mm_lru_activate,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
- __field(struct page *, page )
+ __field(struct folio *, folio )
__field(unsigned long, pfn )
),
TP_fast_assign(
- __entry->page = page;
- __entry->pfn = page_to_pfn(page);
+ __entry->folio = folio;
+ __entry->pfn = folio_pfn(folio);
),
- /* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=0x%lx", __entry->page, __entry->pfn)
-
+ TP_printk("folio=%p pfn=0x%lx", __entry->folio, __entry->pfn)
);
#endif /* _TRACE_PAGEMAP_H */
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index b2a2672e6632..3ba63319af3c 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -13,6 +13,8 @@
#include <linux/tracepoint.h>
+#include <trace/events/sunrpc_base.h>
+
/**
** GSS-API related trace events
**/
@@ -99,7 +101,7 @@ DECLARE_EVENT_CLASS(rpcgss_gssapi_event,
__entry->maj_stat = maj_stat;
),
- TP_printk("task:%u@%u maj_stat=%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " maj_stat=%s",
__entry->task_id, __entry->client_id,
__entry->maj_stat == 0 ?
"GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
@@ -332,7 +334,8 @@ TRACE_EVENT(rpcgss_unwrap_failed,
__entry->client_id = task->tk_client->cl_clid;
),
- TP_printk("task:%u@%u", __entry->task_id, __entry->client_id)
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
+ __entry->task_id, __entry->client_id)
);
TRACE_EVENT(rpcgss_bad_seqno,
@@ -358,7 +361,8 @@ TRACE_EVENT(rpcgss_bad_seqno,
__entry->received = received;
),
- TP_printk("task:%u@%u expected seqno %u, received seqno %u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " expected seqno %u, received seqno %u",
__entry->task_id, __entry->client_id,
__entry->expected, __entry->received)
);
@@ -386,7 +390,7 @@ TRACE_EVENT(rpcgss_seqno,
__entry->seqno = rqst->rq_seqno;
),
- TP_printk("task:%u@%u xid=0x%08x seqno=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno)
);
@@ -418,7 +422,8 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno, __entry->seq_xmit,
__entry->ret ? "" : "un")
@@ -452,7 +457,8 @@ TRACE_EVENT(rpcgss_update_slack,
__entry->verfsize = auth->au_verfsize;
),
- TP_printk("task:%u@%u xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->auth, __entry->rslack, __entry->ralign,
__entry->verfsize)
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index de4195499592..fcd3b3f1020a 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -14,7 +14,9 @@
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/tracepoint.h>
#include <rdma/ib_cm.h>
+
#include <trace/events/rdma.h>
+#include <trace/events/sunrpc_base.h>
/**
** Event classes
@@ -60,6 +62,74 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class,
), \
TP_ARGS(wc, cid))
+DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ ),
+
+ TP_printk("cq.id=%u cid=%d",
+ __entry->cq_id, __entry->completion_id
+ )
+);
+
+#define DEFINE_SEND_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_send_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ __entry->vendor_err = wc->vendor_err;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_SEND_FLUSH_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_send_flush_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
TP_PROTO(
const struct ib_wc *wc,
@@ -145,6 +215,77 @@ DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
), \
TP_ARGS(wc, cid))
+DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, received)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->received = wc->byte_len;
+ ),
+
+ TP_printk("cq.id=%u cid=%d received=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->received
+ )
+);
+
+#define DEFINE_RECEIVE_SUCCESS_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_success_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ __entry->vendor_err = wc->vendor_err;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_RECEIVE_FLUSH_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_flush_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
DECLARE_EVENT_CLASS(xprtrdma_reply_class,
TP_PROTO(
const struct rpcrdma_rep *rep
@@ -279,7 +420,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " pos=%u %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->pos, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
@@ -326,7 +468,8 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->length, (unsigned long long)__entry->offset,
__entry->handle,
@@ -375,10 +518,16 @@ DECLARE_EVENT_CLASS(xprtrdma_mr_class,
TP_fast_assign(
const struct rpcrdma_req *req = mr->mr_req;
- const struct rpc_task *task = req->rl_slot.rq_task;
- __entry->task_id = task->tk_pid;
- __entry->client_id = task->tk_client->cl_clid;
+ if (req) {
+ const struct rpc_task *task = req->rl_slot.rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ } else {
+ __entry->task_id = 0;
+ __entry->client_id = -1;
+ }
__entry->mr_id = mr->mr_ibmr->res.id;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
@@ -387,7 +536,8 @@ DECLARE_EVENT_CLASS(xprtrdma_mr_class,
__entry->dir = mr->mr_dir;
),
- TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
@@ -630,15 +780,16 @@ TRACE_EVENT(xprtrdma_nomrs_err,
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s task:%u@%u",
- __get_str(addr), __get_str(port),
- __entry->task_id, __entry->client_id
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
+ __entry->task_id, __entry->client_id,
+ __get_str(addr), __get_str(port)
)
);
DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
DEFINE_WRCH_EVENT(reply);
+DEFINE_WRCH_EVENT(wp);
TRACE_DEFINE_ENUM(rpcrdma_noch);
TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
@@ -693,7 +844,8 @@ TRACE_EVENT(xprtrdma_marshal,
__entry->wtype = wtype;
),
- TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->hdrlen,
__entry->headlen, __entry->pagelen, __entry->taillen,
@@ -723,7 +875,7 @@ TRACE_EVENT(xprtrdma_marshal_failed,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
@@ -750,7 +902,7 @@ TRACE_EVENT(xprtrdma_prepsend_failed,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
@@ -785,7 +937,7 @@ TRACE_EVENT(xprtrdma_post_send,
__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
),
- TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
__entry->task_id, __entry->client_id,
__entry->cq_id, __entry->completion_id,
__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
@@ -820,7 +972,7 @@ TRACE_EVENT(xprtrdma_post_send_err,
__entry->rc = rc;
),
- TP_printk("task:%u@%u cq.id=%u rc=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
__entry->task_id, __entry->client_id,
__entry->cq_id, __entry->rc
)
@@ -932,7 +1084,7 @@ TRACE_EVENT(xprtrdma_post_linv_err,
__entry->status = status;
),
- TP_printk("task:%u@%u status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
__entry->task_id, __entry->client_id, __entry->status
)
);
@@ -1120,7 +1272,7 @@ TRACE_EVENT(xprtrdma_reply,
__entry->credits = credits;
),
- TP_printk("task:%u@%u xid=0x%08x credits=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->credits
)
@@ -1156,7 +1308,7 @@ TRACE_EVENT(xprtrdma_err_vers,
__entry->max = be32_to_cpup(max);
),
- TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->min, __entry->max
)
@@ -1181,7 +1333,7 @@ TRACE_EVENT(xprtrdma_err_chunk,
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
- TP_printk("task:%u@%u xid=0x%08x",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
__entry->task_id, __entry->client_id, __entry->xid
)
);
@@ -1207,7 +1359,7 @@ TRACE_EVENT(xprtrdma_err_unrecognized,
__entry->procedure = be32_to_cpup(procedure);
),
- TP_printk("task:%u@%u xid=0x%08x procedure=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->procedure
)
@@ -1239,7 +1391,7 @@ TRACE_EVENT(xprtrdma_fixup,
__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
),
- TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
__entry->task_id, __entry->client_id, __entry->fixup,
__entry->headlen, __entry->pagelen, __entry->taillen
)
@@ -1289,7 +1441,7 @@ TRACE_EVENT(xprtrdma_mrs_zap,
__entry->client_id = task->tk_client->cl_clid;
),
- TP_printk("task:%u@%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
__entry->task_id, __entry->client_id
)
);
@@ -1868,7 +2020,9 @@ TRACE_EVENT(svcrdma_post_send,
)
);
-DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
TRACE_EVENT(svcrdma_post_recv,
TP_PROTO(
@@ -1892,7 +2046,9 @@ TRACE_EVENT(svcrdma_post_recv,
)
);
-DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
+DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
+DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
+DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
TRACE_EVENT(svcrdma_rq_post_err,
TP_PROTO(
@@ -1956,8 +2112,42 @@ DEFINE_POST_CHUNK_EVENT(read);
DEFINE_POST_CHUNK_EVENT(write);
DEFINE_POST_CHUNK_EVENT(reply);
-DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
-DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
+TRACE_EVENT(svcrdma_wc_read,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid,
+ unsigned int totalbytes,
+ const ktime_t posttime
+ ),
+
+ TP_ARGS(wc, cid, totalbytes, posttime),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(s64, read_latency)
+ __field(unsigned int, totalbytes)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->totalbytes = totalbytes;
+ __entry->read_latency = ktime_us_delta(ktime_get(), posttime);
+ ),
+
+ TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
+ __entry->cq_id, __entry->completion_id,
+ __entry->totalbytes, __entry->read_latency
+ )
+);
+
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
+
+DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
TRACE_EVENT(svcrdma_qp_error,
TP_PROTO(
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 2d04eb96d418..3a99358c262b 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,6 +14,8 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
+#include <trace/events/sunrpc_base.h>
+
TRACE_DEFINE_ENUM(SOCK_STREAM);
TRACE_DEFINE_ENUM(SOCK_DGRAM);
TRACE_DEFINE_ENUM(SOCK_RAW);
@@ -62,6 +64,7 @@ DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
__field(size_t, head_len)
__field(const void *, tail_base)
__field(size_t, tail_len)
+ __field(unsigned int, page_base)
__field(unsigned int, page_len)
__field(unsigned int, msg_len)
),
@@ -74,14 +77,18 @@ DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
__entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_base = xdr->page_base;
__entry->page_len = xdr->page_len;
__entry->msg_len = xdr->len;
),
- TP_printk("task:%u@%u head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u",
__entry->task_id, __entry->client_id,
- __entry->head_base, __entry->head_len, __entry->page_len,
- __entry->tail_base, __entry->tail_len, __entry->msg_len
+ __entry->head_base, __entry->head_len,
+ __entry->page_len, __entry->page_base,
+ __entry->tail_base, __entry->tail_len,
+ __entry->msg_len
)
);
@@ -114,7 +121,7 @@ DECLARE_EVENT_CLASS(rpc_clnt_class,
__entry->client_id = clnt->cl_clid;
),
- TP_printk("clid=%u", __entry->client_id)
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER, __entry->client_id)
);
#define DEFINE_RPC_CLNT_EVENT(name) \
@@ -158,7 +165,8 @@ TRACE_EVENT(rpc_clnt_new,
__assign_str(server, server);
),
- TP_printk("client=%u peer=[%s]:%s program=%s server=%s",
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER
+ " peer=[%s]:%s program=%s server=%s",
__entry->client_id, __get_str(addr), __get_str(port),
__get_str(program), __get_str(server))
);
@@ -206,7 +214,8 @@ TRACE_EVENT(rpc_clnt_clone_err,
__entry->error = error;
),
- TP_printk("client=%u error=%d", __entry->client_id, __entry->error)
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " error=%d",
+ __entry->client_id, __entry->error)
);
@@ -248,7 +257,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
__entry->status = task->tk_status;
),
- TP_printk("task:%u@%u status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -288,7 +297,7 @@ TRACE_EVENT(rpc_request,
__assign_str(procname, rpc_proc_name(task));
),
- TP_printk("task:%u@%u %sv%d %s (%ssync)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s (%ssync)",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version,
__get_str(procname), __entry->async ? "a": ""
@@ -348,7 +357,8 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->flags = task->tk_flags;
),
- TP_printk("task:%u@%d flags=%s runstate=%s status=%d action=%ps",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " flags=%s runstate=%s status=%d action=%ps",
__entry->task_id, __entry->client_id,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
@@ -372,6 +382,7 @@ DEFINE_RPC_RUNNING_EVENT(complete);
DEFINE_RPC_RUNNING_EVENT(timeout);
DEFINE_RPC_RUNNING_EVENT(signalled);
DEFINE_RPC_RUNNING_EVENT(end);
+DEFINE_RPC_RUNNING_EVENT(call_done);
DECLARE_EVENT_CLASS(rpc_task_queued,
@@ -400,7 +411,8 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%d flags=%s runstate=%s status=%d timeout=%lu queue=%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " flags=%s runstate=%s status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
@@ -436,7 +448,7 @@ DECLARE_EVENT_CLASS(rpc_failure,
__entry->client_id = task->tk_client->cl_clid;
),
- TP_printk("task:%u@%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
__entry->task_id, __entry->client_id)
);
@@ -478,7 +490,8 @@ DECLARE_EVENT_CLASS(rpc_reply_event,
__assign_str(servername, task->tk_xprt->servername);
),
- TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " server=%s xid=0x%08x %sv%d %s",
__entry->task_id, __entry->client_id, __get_str(servername),
__entry->xid, __get_str(progname), __entry->version,
__get_str(procname))
@@ -538,7 +551,8 @@ TRACE_EVENT(rpc_buf_alloc,
__entry->status = status;
),
- TP_printk("task:%u@%u callsize=%zu recvsize=%zu status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " callsize=%zu recvsize=%zu status=%d",
__entry->task_id, __entry->client_id,
__entry->callsize, __entry->recvsize, __entry->status
)
@@ -567,7 +581,8 @@ TRACE_EVENT(rpc_call_rpcerror,
__entry->rpc_status = rpc_status;
),
- TP_printk("task:%u@%u tk_status=%d rpc_status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " tk_status=%d rpc_status=%d",
__entry->task_id, __entry->client_id,
__entry->tk_status, __entry->rpc_status)
);
@@ -607,7 +622,8 @@ TRACE_EVENT(rpc_stats_latency,
__entry->execute = ktime_to_us(execute);
),
- TP_printk("task:%u@%d xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
__entry->task_id, __entry->client_id, __entry->xid,
__get_str(progname), __entry->version, __get_str(procname),
__entry->backlog, __entry->rtt, __entry->execute)
@@ -651,8 +667,8 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->version = task->tk_client->cl_vers;
__assign_str(procedure, task->tk_msg.rpc_proc->p_name);
} else {
- __entry->task_id = 0;
- __entry->client_id = 0;
+ __entry->task_id = -1;
+ __entry->client_id = -1;
__assign_str(progname, "unknown");
__entry->version = 0;
__assign_str(procedure, "unknown");
@@ -668,8 +684,8 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->len = xdr->buf->len;
),
- TP_printk(
- "task:%u@%u %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->requested, __entry->p, __entry->end,
@@ -727,8 +743,8 @@ TRACE_EVENT(rpc_xdr_alignment,
__entry->len = xdr->buf->len;
),
- TP_printk(
- "task:%u@%u %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->offset, __entry->copied,
@@ -917,7 +933,8 @@ TRACE_EVENT(rpc_socket_nospace,
__entry->remaining = rqst->rq_slen - transport->xmit.offset;
),
- TP_printk("task:%u@%u total=%u remaining=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " total=%u remaining=%u",
__entry->task_id, __entry->client_id,
__entry->total, __entry->remaining
)
@@ -925,18 +942,18 @@ TRACE_EVENT(rpc_socket_nospace,
#define rpc_show_xprt_state(x) \
__print_flags(x, "|", \
- { (1UL << XPRT_LOCKED), "LOCKED"}, \
- { (1UL << XPRT_CONNECTED), "CONNECTED"}, \
- { (1UL << XPRT_CONNECTING), "CONNECTING"}, \
- { (1UL << XPRT_CLOSE_WAIT), "CLOSE_WAIT"}, \
- { (1UL << XPRT_BOUND), "BOUND"}, \
- { (1UL << XPRT_BINDING), "BINDING"}, \
- { (1UL << XPRT_CLOSING), "CLOSING"}, \
- { (1UL << XPRT_OFFLINE), "OFFLINE"}, \
- { (1UL << XPRT_REMOVE), "REMOVE"}, \
- { (1UL << XPRT_CONGESTED), "CONGESTED"}, \
- { (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \
- { (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"})
+ { BIT(XPRT_LOCKED), "LOCKED" }, \
+ { BIT(XPRT_CONNECTED), "CONNECTED" }, \
+ { BIT(XPRT_CONNECTING), "CONNECTING" }, \
+ { BIT(XPRT_CLOSE_WAIT), "CLOSE_WAIT" }, \
+ { BIT(XPRT_BOUND), "BOUND" }, \
+ { BIT(XPRT_BINDING), "BINDING" }, \
+ { BIT(XPRT_CLOSING), "CLOSING" }, \
+ { BIT(XPRT_OFFLINE), "OFFLINE" }, \
+ { BIT(XPRT_REMOVE), "REMOVE" }, \
+ { BIT(XPRT_CONGESTED), "CONGESTED" }, \
+ { BIT(XPRT_CWND_WAIT), "CWND_WAIT" }, \
+ { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" })
DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
TP_PROTO(
@@ -1042,8 +1059,8 @@ TRACE_EVENT(xprt_transmit,
__entry->status = status;
),
- TP_printk(
- "task:%u@%u xid=0x%08x seqno=%u status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x seqno=%u status=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->seqno, __entry->status)
);
@@ -1082,8 +1099,8 @@ TRACE_EVENT(xprt_retransmit,
__assign_str(procname, rpc_proc_name(task));
),
- TP_printk(
- "task:%u@%u xid=0x%08x %sv%d %s ntrans=%d timeout=%lu",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x %sv%d %s ntrans=%d timeout=%lu",
__entry->task_id, __entry->client_id, __entry->xid,
__get_str(progname), __entry->version, __get_str(procname),
__entry->ntrans, __entry->timeout
@@ -1137,7 +1154,8 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
xprt->snd_task->tk_pid : -1;
),
- TP_printk("task:%u@%u snd_task:%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " snd_task:" SUNRPC_TRACE_PID_SPECIFIER,
__entry->task_id, __entry->client_id,
__entry->snd_task_id)
);
@@ -1185,7 +1203,9 @@ DECLARE_EVENT_CLASS(xprt_cong_event,
__entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
),
- TP_printk("task:%u@%u snd_task:%u cong=%lu cwnd=%lu%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " snd_task:" SUNRPC_TRACE_PID_SPECIFIER
+ " cong=%lu cwnd=%lu%s",
__entry->task_id, __entry->client_id,
__entry->snd_task_id, __entry->cong, __entry->cwnd,
__entry->wait ? " (wait)" : "")
@@ -1223,7 +1243,7 @@ TRACE_EVENT(xprt_reserve,
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
- TP_printk("task:%u@%u xid=0x%08x",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
__entry->task_id, __entry->client_id, __entry->xid
)
);
@@ -1312,7 +1332,8 @@ TRACE_EVENT(rpcb_getport,
__assign_str(servername, task->tk_xprt->servername);
),
- TP_printk("task:%u@%u server=%s program=%u version=%u protocol=%d bind_version=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " server=%s program=%u version=%u protocol=%d bind_version=%u",
__entry->task_id, __entry->client_id, __get_str(servername),
__entry->program, __entry->version, __entry->protocol,
__entry->bind_version
@@ -1342,7 +1363,7 @@ TRACE_EVENT(rpcb_setport,
__entry->port = port;
),
- TP_printk("task:%u@%u status=%d port=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d port=%u",
__entry->task_id, __entry->client_id,
__entry->status, __entry->port
)
@@ -1496,6 +1517,7 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
__field(size_t, head_len)
__field(const void *, tail_base)
__field(size_t, tail_len)
+ __field(unsigned int, page_base)
__field(unsigned int, page_len)
__field(unsigned int, msg_len)
),
@@ -1506,14 +1528,17 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
__entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_base = xdr->page_base;
__entry->page_len = xdr->page_len;
__entry->msg_len = xdr->len;
),
- TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u",
__entry->xid,
- __entry->head_base, __entry->head_len, __entry->page_len,
- __entry->tail_base, __entry->tail_len, __entry->msg_len
+ __entry->head_base, __entry->head_len,
+ __entry->page_len, __entry->page_base,
+ __entry->tail_base, __entry->tail_len,
+ __entry->msg_len
)
);
@@ -1859,6 +1884,24 @@ TRACE_EVENT(svc_wake_up,
TP_printk("pid=%d", __entry->pid)
);
+TRACE_EVENT(svc_alloc_arg_err,
+ TP_PROTO(
+ unsigned int pages
+ ),
+
+ TP_ARGS(pages),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, pages)
+ ),
+
+ TP_fast_assign(
+ __entry->pages = pages;
+ ),
+
+ TP_printk("pages=%u", __entry->pages)
+);
+
TRACE_EVENT(svc_handle_xprt,
TP_PROTO(struct svc_xprt *xprt, int len),
diff --git a/include/trace/events/sunrpc_base.h b/include/trace/events/sunrpc_base.h
new file mode 100644
index 000000000000..588557d07ea8
--- /dev/null
+++ b/include/trace/events/sunrpc_base.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Oracle and/or its affiliates.
+ *
+ * Common types and format specifiers for sunrpc.
+ */
+
+#if !defined(_TRACE_SUNRPC_BASE_H)
+#define _TRACE_SUNRPC_BASE_H
+
+#include <linux/tracepoint.h>
+
+#define SUNRPC_TRACE_PID_SPECIFIER "%08x"
+#define SUNRPC_TRACE_CLID_SPECIFIER "%08x"
+#define SUNRPC_TRACE_TASK_SPECIFIER \
+ "task:" SUNRPC_TRACE_PID_SPECIFIER "@" SUNRPC_TRACE_CLID_SPECIFIER
+
+#endif /* _TRACE_SUNRPC_BASE_H */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 88faf2400ec2..f25a6149d3ba 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -27,6 +27,18 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
+#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
+#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
+#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
+
+#define show_throttle_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ {_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
+ {_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
+ {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"} \
+ ) : "VMSCAN_THROTTLE_NONE"
+
+
#define trace_reclaim_flags(file) ( \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(RECLAIM_WB_ASYNC) \
@@ -454,6 +466,32 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_node_reclaim_end,
TP_ARGS(nr_reclaimed)
);
+TRACE_EVENT(mm_vmscan_throttled,
+
+ TP_PROTO(int nid, int usec_timeout, int usec_delayed, int reason),
+
+ TP_ARGS(nid, usec_timeout, usec_delayed, reason),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, usec_timeout)
+ __field(int, usec_delayed)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->usec_timeout = usec_timeout;
+ __entry->usec_delayed = usec_delayed;
+ __entry->reason = 1U << reason;
+ ),
+
+ TP_printk("nid=%d usec_timeout=%d usect_delayed=%d reason=%s",
+ __entry->nid,
+ __entry->usec_timeout,
+ __entry->usec_delayed,
+ show_throttle_flags(__entry->reason))
+);
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 840d1ba84cf5..a345b1e12daf 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -52,11 +52,11 @@ WB_WORK_REASON
struct wb_writeback_work;
-DECLARE_EVENT_CLASS(writeback_page_template,
+DECLARE_EVENT_CLASS(writeback_folio_template,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping),
+ TP_ARGS(folio, mapping),
TP_STRUCT__entry (
__array(char, name, 32)
@@ -69,7 +69,7 @@ DECLARE_EVENT_CLASS(writeback_page_template,
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
NULL), 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
- __entry->index = page->index;
+ __entry->index = folio->index;
),
TP_printk("bdi %s: ino=%lu index=%lu",
@@ -79,18 +79,18 @@ DECLARE_EVENT_CLASS(writeback_page_template,
)
);
-DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
+DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping)
+ TP_ARGS(folio, mapping)
);
-DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
+DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping)
+ TP_ARGS(folio, mapping)
);
DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
@@ -236,9 +236,9 @@ TRACE_EVENT(inode_switch_wbs,
TRACE_EVENT(track_foreign_dirty,
- TP_PROTO(struct page *page, struct bdi_writeback *wb),
+ TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
- TP_ARGS(page, wb),
+ TP_ARGS(folio, wb),
TP_STRUCT__entry(
__array(char, name, 32)
@@ -250,7 +250,7 @@ TRACE_EVENT(track_foreign_dirty,
),
TP_fast_assign(
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping = folio_mapping(folio);
struct inode *inode = mapping ? mapping->host : NULL;
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
@@ -258,7 +258,7 @@ TRACE_EVENT(track_foreign_dirty,
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- __entry->page_cgroup_ino = cgroup_ino(page_memcg(page)->css.cgroup);
+ __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
),
TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
@@ -763,13 +763,6 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
TP_ARGS(usec_timeout, usec_delayed)
);
-DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed)
-);
-
DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_PROTO(struct inode *inode,
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 9dc0bf0c5a6e..ecd0f5bdfc1d 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -181,6 +181,10 @@ struct f_owner_ex {
blocking */
#define LOCK_UN 8 /* remove lock */
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
diff --git a/include/uapi/asm-generic/signal-defs.h b/include/uapi/asm-generic/signal-defs.h
index fe929e7b77ca..7572f2f46ee8 100644
--- a/include/uapi/asm-generic/signal-defs.h
+++ b/include/uapi/asm-generic/signal-defs.h
@@ -45,6 +45,7 @@
#define SA_UNSUPPORTED 0x00000400
#define SA_EXPOSE_TAGBITS 0x00000800
/* 0x00010000 used on mips */
+/* 0x00800000 used for internal SA_IMMUTABLE */
/* 0x01000000 used on x86 */
/* 0x02000000 used on x86 */
/*
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 1f0a2b4864e4..c77a1313b3b0 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -126,6 +126,8 @@
#define SO_BUF_LOCK 72
+#define SO_RESERVE_MEM 73
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 1c5fb86d455a..4557a8b6086f 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -880,8 +880,11 @@ __SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
+#define __NR_futex_waitv 449
+__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
+
#undef __NR_syscalls
-#define __NR_syscalls 449
+#define __NR_syscalls 450
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 0cbd1540aeac..26e45fc5eb1a 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -786,13 +786,6 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
-/* query video encode/decode caps */
-#define AMDGPU_INFO_VIDEO_CAPS 0x21
- /* Subquery id: Decode */
- #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
- /* Subquery id: Encode */
- #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
-
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
/* RAS MASK: SDMA */
@@ -821,6 +814,12 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
/* RAS MASK: FUSE */
#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS 0x21
+ /* Subquery id: Decode */
+ #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
+ /* Subquery id: Encode */
+ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 9f4bb4a6f358..7f652c96845b 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -104,6 +104,12 @@ extern "C" {
/* 8 bpp Red */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+/* 10 bpp Red */
+#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
+
+/* 12 bpp Red */
+#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
+
/* 16 bpp Red */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
@@ -373,6 +379,12 @@ extern "C" {
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+
+#define fourcc_mod_is_vendor(modifier, vendor) \
+ (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
+
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 90c55383f1ee..e1e351682872 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -1110,6 +1110,10 @@ struct drm_mode_destroy_blob {
* struct drm_mode_create_lease - Create lease
*
* Lease mode resources, creating another drm_master.
+ *
+ * The @object_ids array must reference at least one CRTC, one connector and
+ * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
+ * the lease can be completely empty.
*/
struct drm_mode_create_lease {
/** @object_ids: Pointer to array of object ids (__u32) */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index bde5860b3686..914ebd9290e5 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1522,6 +1522,12 @@ struct drm_i915_gem_caching {
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
+/*
+ * Do not add new tiling types here. The I915_TILING_* values are for
+ * de-tiling fence registers that no longer exist on modern platforms. Although
+ * the hardware may support new types of tiling in general (e.g., Tile4), we
+ * do not need to add them to the uapi that is specific to now-defunct ioctls.
+ */
#define I915_TILING_LAST I915_TILING_Y
#define I915_BIT_6_SWIZZLE_NONE 0
@@ -1824,6 +1830,7 @@ struct drm_i915_gem_context_param {
* Extensions:
* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
@@ -1846,6 +1853,55 @@ struct drm_i915_gem_context_param {
* attempted to use it, never re-use this context param number.
*/
#define I915_CONTEXT_PARAM_RINGSIZE 0xc
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * Mark that the context makes use of protected content, which will result
+ * in the context being invalidated when the protected content session is.
+ * Given that the protected content session is killed on suspend, the device
+ * is kept awake for the lifetime of a protected context, so the user should
+ * make sure to dispose of them once done.
+ * This flag can only be set at context creation time and, when set to true,
+ * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
+ * to false. This flag can't be set to true in conjunction with setting the
+ * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_context_create_ext_setparam p_protected = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
+ * .value = 1,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * .next_extension = to_user_pointer(&p_protected),
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ * .value = 0,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_norecover);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * In addition to the normal failure cases, setting this flag during context
+ * creation can result in the following errors:
+ *
+ * -ENODEV: feature not available
+ * -EPERM: trying to mark a recoverable or not bannable context as protected
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -2050,6 +2106,135 @@ struct i915_context_engines_bond {
} __attribute__((packed)) name__
/**
+ * struct i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915 to
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior are described below. Lastly, the default is to not allow BBs to be
+ * preempted mid-batch. Rather insert coordinated preemption points on all
+ * hardware contexts between each set of BBs. Flags could be added in the future
+ * to change both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ * Examples syntax:
+ * CS[X] = generic engine of same class, logical instance X
+ * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *
+ * Example 1 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=1,
+ * engines=CS[0],CS[1])
+ *
+ * Results in the following valid placement:
+ * CS[0], CS[1]
+ *
+ * Example 2 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ * Results in the following valid placements:
+ * CS[0], CS[1]
+ * CS[2], CS[3]
+ *
+ * This can be thought of as two virtual engines, each containing two
+ * engines thereby making a 2D array. However, there are bonds tying the
+ * entries together and placing restrictions on how they can be scheduled.
+ * Specifically, the scheduler can choose only vertical columns from the 2D
+ * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
+ * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
+ * versa. Same for CS[2] requires also using CS[3].
+ * VE[0] = CS[0], CS[2]
+ * VE[1] = CS[1], CS[3]
+ *
+ * Example 3 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ * Results in the following valid and invalid placements:
+ * CS[0], CS[1]
+ * CS[1], CS[3] - Not logically contiguous, return -EINVAL
+ */
+struct i915_context_engines_parallel_submit {
+ /**
+ * @base: base user extension.
+ */
+ struct i915_user_extension base;
+
+ /**
+ * @engine_index: slot for parallel engine
+ */
+ __u16 engine_index;
+
+ /**
+ * @width: number of contexts per parallel engine or in other words the
+ * number of batches in each submission
+ */
+ __u16 width;
+
+ /**
+ * @num_siblings: number of siblings per context or in other words the
+ * number of possible placements for each submission
+ */
+ __u16 num_siblings;
+
+ /**
+ * @mbz16: reserved for future use; must be zero
+ */
+ __u16 mbz16;
+
+ /**
+ * @flags: all undefined flags must be zero, currently not defined flags
+ */
+ __u64 flags;
+
+ /**
+ * @mbz64: reserved for future use; must be zero
+ */
+ __u64 mbz64[3];
+
+ /**
+ * @engines: 2-d array of engine instances to configure parallel engine
+ *
+ * length = width (i) * num_siblings (j)
+ * index = j + i * num_siblings
+ */
+ struct i915_engine_class_instance engines[0];
+
+} __packed;
+
+#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 width; \
+ __u16 num_siblings; \
+ __u16 mbz16; \
+ __u64 flags; \
+ __u64 mbz64[3]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
* DOC: Context Engine Map uAPI
*
* Context engine map is a new way of addressing engines when submitting batch-
@@ -2108,6 +2293,7 @@ struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
struct i915_engine_class_instance engines[0];
} __attribute__((packed));
@@ -2726,14 +2912,20 @@ struct drm_i915_engine_info {
/** @flags: Engine flags. */
__u64 flags;
+#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
/** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+ /** @logical_instance: Logical instance of engine */
+ __u16 logical_instance;
+
/** @rsvd1: Reserved fields. */
- __u64 rsvd1[4];
+ __u16 rsvd1[3];
+ /** @rsvd2: Reserved fields. */
+ __u64 rsvd2[3];
};
/**
@@ -2979,8 +3171,12 @@ struct drm_i915_gem_create_ext {
*
* For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
* struct drm_i915_gem_create_ext_memory_regions.
+ *
+ * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+ * struct drm_i915_gem_create_ext_protected_content.
*/
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
__u64 extensions;
};
@@ -3038,6 +3234,50 @@ struct drm_i915_gem_create_ext_memory_regions {
__u64 regions;
};
+/**
+ * struct drm_i915_gem_create_ext_protected_content - The
+ * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
+ *
+ * If this extension is provided, buffer contents are expected to be protected
+ * by PXP encryption and require decryption for scan out and processing. This
+ * is only possible on platforms that have PXP enabled, on all other scenarios
+ * using this extension will cause the ioctl to fail and return -ENODEV. The
+ * flags parameter is reserved for future expansion and must currently be set
+ * to zero.
+ *
+ * The buffer contents are considered invalid after a PXP session teardown.
+ *
+ * The encryption is guaranteed to be processed correctly only if the object
+ * is submitted with a context created using the
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
+ * at submission time on the validity of the objects involved.
+ *
+ * Below is an example on how to create a protected object:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_protected_content protected_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
+ * .flags = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&protected_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_protected_content {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+};
+
+/* ID of the protected content session managed by i915 when PXP is active */
+#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
index 8c4337548ab5..bb31567e66c0 100644
--- a/include/uapi/drm/mga_drm.h
+++ b/include/uapi/drm/mga_drm.h
@@ -279,20 +279,22 @@ typedef struct drm_mga_init {
unsigned long sarea_priv_offset;
- int chipset;
- int sgram;
+ __struct_group(/* no tag */, always32bit, /* no attrs */,
+ int chipset;
+ int sgram;
- unsigned int maccess;
+ unsigned int maccess;
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ );
unsigned long fb_offset;
unsigned long mmio_offset;
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 4104f22fb3d3..3dfc0af8756a 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -58,6 +58,67 @@ extern "C" {
struct drm_v3d_perfmon_get_values)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
+#define DRM_V3D_SUBMIT_EXTENSION 0x02
+
+/* struct drm_v3d_extension - ioctl extensions
+ *
+ * Linked-list of generic extensions where the id identify which struct is
+ * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify
+ * the extension type.
+ */
+struct drm_v3d_extension {
+ __u64 next;
+ __u32 id;
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+ __u32 flags; /* mbz */
+};
+
+/* struct drm_v3d_sem - wait/signal semaphore
+ *
+ * If binary semaphore, it only takes syncobj handle and ignores flags and
+ * point fields. Point is defined for timeline syncobj feature.
+ */
+struct drm_v3d_sem {
+ __u32 handle; /* syncobj */
+ /* rsv below, for future uses */
+ __u32 flags;
+ __u64 point; /* for timeline sem support */
+ __u64 mbz[2]; /* must be zero, rsv */
+};
+
+/* Enum for each of the V3D queues. */
+enum v3d_queue {
+ V3D_BIN,
+ V3D_RENDER,
+ V3D_TFU,
+ V3D_CSD,
+ V3D_CACHE_CLEAN,
+};
+
+/**
+ * struct drm_v3d_multi_sync - ioctl extension to add support multiples
+ * syncobjs for commands submission.
+ *
+ * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to
+ * this extension to define wait and signal dependencies, instead of single
+ * in/out sync entries on submitting commands. The field flags is used to
+ * determine the stage to set wait dependencies.
+ */
+struct drm_v3d_multi_sync {
+ struct drm_v3d_extension base;
+ /* Array of wait and signal semaphores */
+ __u64 in_syncs;
+ __u64 out_syncs;
+
+ /* Number of entries */
+ __u32 in_sync_count;
+ __u32 out_sync_count;
+
+ /* set the stage (v3d_queue) to sync */
+ __u32 wait_stage;
+
+ __u32 pad; /* mbz */
+};
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -135,12 +196,16 @@ struct drm_v3d_submit_cl {
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
+ /* DRM_V3D_SUBMIT_* properties */
__u32 flags;
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmon_id;
__u32 pad;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/**
@@ -210,6 +275,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CSD,
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
+ DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
};
struct drm_v3d_get_param {
@@ -248,6 +314,11 @@ struct drm_v3d_submit_tfu {
__u32 in_sync;
/* Sync object to signal when the TFU job is done. */
__u32 out_sync;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -276,6 +347,13 @@ struct drm_v3d_submit_csd {
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmon_id;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+
+ __u32 flags;
+
+ __u32 pad;
};
enum {
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index b9ec26e9c646..a13e20cc66b4 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,12 +47,15 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@@ -68,6 +71,8 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+ __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
+ __u32 pad;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -75,6 +80,8 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
+#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
+#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
struct drm_virtgpu_getparam {
__u64 param;
@@ -173,6 +180,22 @@ struct drm_virtgpu_resource_create_blob {
__u64 blob_id;
};
+#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
+#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
+#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+struct drm_virtgpu_context_set_param {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_virtgpu_context_init {
+ __u32 num_params;
+ __u32 pad;
+
+ /* pointer to drm_virtgpu_context_set_param array */
+ __u64 ctx_set_params;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -212,6 +235,10 @@ struct drm_virtgpu_resource_create_blob {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
struct drm_virtgpu_resource_create_blob)
+#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
+ struct drm_virtgpu_context_init)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/acrn.h b/include/uapi/linux/acrn.h
index 353b2a2e4536..ccf47ed92500 100644
--- a/include/uapi/linux/acrn.h
+++ b/include/uapi/linux/acrn.h
@@ -396,6 +396,7 @@ struct acrn_ptdev_irq {
/* Type of PCI device assignment */
#define ACRN_PTDEV_QUIRK_ASSIGN (1U << 0)
+#define ACRN_MMIODEV_RES_NUM 3
#define ACRN_PCI_NUM_BARS 6
/**
* struct acrn_pcidev - Info for assigning or de-assigning a PCI device
@@ -418,6 +419,67 @@ struct acrn_pcidev {
};
/**
+ * struct acrn_mmiodev - Info for assigning or de-assigning a MMIO device
+ * @name: Name of the MMIO device.
+ * @res[].user_vm_pa: Physical address of User VM of the MMIO region
+ * for the MMIO device.
+ * @res[].service_vm_pa: Physical address of Service VM of the MMIO
+ * region for the MMIO device.
+ * @res[].size: Size of the MMIO region for the MMIO device.
+ * @res[].mem_type: Memory type of the MMIO region for the MMIO
+ * device.
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_mmiodev {
+ __u8 name[8];
+ struct {
+ __u64 user_vm_pa;
+ __u64 service_vm_pa;
+ __u64 size;
+ __u64 mem_type;
+ } res[ACRN_MMIODEV_RES_NUM];
+};
+
+/**
+ * struct acrn_vdev - Info for creating or destroying a virtual device
+ * @id: Union of identifier of the virtual device
+ * @id.value: Raw data of the identifier
+ * @id.fields.vendor: Vendor id of the virtual PCI device
+ * @id.fields.device: Device id of the virtual PCI device
+ * @id.fields.legacy_id: ID of the virtual device if not a PCI device
+ * @slot: Virtual Bus/Device/Function of the virtual
+ * device
+ * @io_base: IO resource base address of the virtual device
+ * @io_size: IO resource size of the virtual device
+ * @args: Arguments for the virtual device creation
+ *
+ * The created virtual device can be a PCI device or a legacy device (e.g.
+ * a virtual UART controller) and it is emulated by the hypervisor. This
+ * structure will be passed to hypervisor directly.
+ */
+struct acrn_vdev {
+ /*
+ * the identifier of the device, the low 32 bits represent the vendor
+ * id and device id of PCI device and the high 32 bits represent the
+ * device number of the legacy device
+ */
+ union {
+ __u64 value;
+ struct {
+ __le16 vendor;
+ __le16 device;
+ __le32 legacy_id;
+ } fields;
+ } id;
+
+ __u64 slot;
+ __u32 io_addr[ACRN_PCI_NUM_BARS];
+ __u32 io_size[ACRN_PCI_NUM_BARS];
+ __u8 args[128];
+};
+
+/**
* struct acrn_msi_entry - Info for injecting a MSI interrupt to a VM
* @msi_addr: MSI addr[19:12] with dest vCPU ID
* @msi_data: MSI data[7:0] with vector
@@ -568,6 +630,14 @@ struct acrn_irqfd {
_IOW(ACRN_IOCTL_TYPE, 0x55, struct acrn_pcidev)
#define ACRN_IOCTL_DEASSIGN_PCIDEV \
_IOW(ACRN_IOCTL_TYPE, 0x56, struct acrn_pcidev)
+#define ACRN_IOCTL_ASSIGN_MMIODEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x57, struct acrn_mmiodev)
+#define ACRN_IOCTL_DEASSIGN_MMIODEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x58, struct acrn_mmiodev)
+#define ACRN_IOCTL_CREATE_VDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x59, struct acrn_vdev)
+#define ACRN_IOCTL_DESTROY_VDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x5A, struct acrn_vdev)
#define ACRN_IOCTL_PM_GET_CPU_STATE \
_IOWR(ACRN_IOCTL_TYPE, 0x60, __u64)
diff --git a/include/uapi/linux/amt.h b/include/uapi/linux/amt.h
new file mode 100644
index 000000000000..2dccff417195
--- /dev/null
+++ b/include/uapi/linux/amt.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _UAPI_AMT_H_
+#define _UAPI_AMT_H_
+
+enum ifla_amt_mode {
+ /* AMT interface works as Gateway mode.
+ * The Gateway mode encapsulates IGMP/MLD traffic and decapsulates
+ * multicast traffic.
+ */
+ AMT_MODE_GATEWAY = 0,
+ /* AMT interface works as Relay mode.
+ * The Relay mode encapsulates multicast traffic and decapsulates
+ * IGMP/MLD traffic.
+ */
+ AMT_MODE_RELAY,
+ __AMT_MODE_MAX,
+};
+
+#define AMT_MODE_MAX (__AMT_MODE_MAX - 1)
+
+enum {
+ IFLA_AMT_UNSPEC,
+ /* This attribute specify mode etier Gateway or Relay. */
+ IFLA_AMT_MODE,
+ /* This attribute specify Relay port.
+ * AMT interface is created as Gateway mode, this attribute is used
+ * to specify relay(remote) port.
+ * AMT interface is created as Relay mode, this attribute is used
+ * as local port.
+ */
+ IFLA_AMT_RELAY_PORT,
+ /* This attribute specify Gateway port.
+ * AMT interface is created as Gateway mode, this attribute is used
+ * as local port.
+ * AMT interface is created as Relay mode, this attribute is not used.
+ */
+ IFLA_AMT_GATEWAY_PORT,
+ /* This attribute specify physical device */
+ IFLA_AMT_LINK,
+ /* This attribute specify local ip address */
+ IFLA_AMT_LOCAL_IP,
+ /* This attribute specify Relay ip address.
+ * So, this is not used by Relay.
+ */
+ IFLA_AMT_REMOTE_IP,
+ /* This attribute specify Discovery ip address.
+ * When Gateway get started, it send discovery message to find the
+ * Relay's ip address.
+ * So, this is not used by Relay.
+ */
+ IFLA_AMT_DISCOVERY_IP,
+ /* This attribute specify number of maximum tunnel. */
+ IFLA_AMT_MAX_TUNNELS,
+ __IFLA_AMT_MAX,
+};
+
+#define IFLA_AMT_MAX (__IFLA_AMT_MAX - 1)
+
+#endif /* _UAPI_AMT_H_ */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index daa481729e9b..9176a095fefc 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -118,6 +118,10 @@
#define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */
#define AUDIT_BPF 1334 /* BPF subsystem */
#define AUDIT_EVENT_LISTENER 1335 /* Task joined multicast read socket */
+#define AUDIT_URINGOP 1336 /* io_uring operation */
+#define AUDIT_OPENAT2 1337 /* Record showing openat2 how args */
+#define AUDIT_DM_CTRL 1338 /* Device Mapper target control */
+#define AUDIT_DM_EVENT 1339 /* Device Mapper events */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -166,8 +170,9 @@
#define AUDIT_FILTER_EXCLUDE 0x05 /* Apply rule before record creation */
#define AUDIT_FILTER_TYPE AUDIT_FILTER_EXCLUDE /* obsolete misleading naming */
#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
+#define AUDIT_FILTER_URING_EXIT 0x07 /* Apply rule at io_uring op exit */
-#define AUDIT_NR_FILTERS 7
+#define AUDIT_NR_FILTERS 8
#define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
deleted file mode 100644
index cf7399f03b71..000000000000
--- a/include/uapi/linux/bcache.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_BCACHE_H
-#define _LINUX_BCACHE_H
-
-/*
- * Bcache on disk data structures
- */
-
-#include <linux/types.h>
-
-#define BITMASK(name, type, field, offset, size) \
-static inline __u64 name(const type *k) \
-{ return (k->field >> offset) & ~(~0ULL << size); } \
- \
-static inline void SET_##name(type *k, __u64 v) \
-{ \
- k->field &= ~(~(~0ULL << size) << offset); \
- k->field |= (v & ~(~0ULL << size)) << offset; \
-}
-
-/* Btree keys - all units are in sectors */
-
-struct bkey {
- __u64 high;
- __u64 low;
- __u64 ptr[];
-};
-
-#define KEY_FIELD(name, field, offset, size) \
- BITMASK(name, struct bkey, field, offset, size)
-
-#define PTR_FIELD(name, offset, size) \
-static inline __u64 name(const struct bkey *k, unsigned int i) \
-{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
- \
-static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
-{ \
- k->ptr[i] &= ~(~(~0ULL << size) << offset); \
- k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
-}
-
-#define KEY_SIZE_BITS 16
-#define KEY_MAX_U64S 8
-
-KEY_FIELD(KEY_PTRS, high, 60, 3)
-KEY_FIELD(HEADER_SIZE, high, 58, 2)
-KEY_FIELD(KEY_CSUM, high, 56, 2)
-KEY_FIELD(KEY_PINNED, high, 55, 1)
-KEY_FIELD(KEY_DIRTY, high, 36, 1)
-
-KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
-KEY_FIELD(KEY_INODE, high, 0, 20)
-
-/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
-
-static inline __u64 KEY_OFFSET(const struct bkey *k)
-{
- return k->low;
-}
-
-static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
-{
- k->low = v;
-}
-
-/*
- * The high bit being set is a relic from when we used it to do binary
- * searches - it told you where a key started. It's not used anymore,
- * and can probably be safely dropped.
- */
-#define KEY(inode, offset, size) \
-((struct bkey) { \
- .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
- .low = (offset) \
-})
-
-#define ZERO_KEY KEY(0, 0, 0)
-
-#define MAX_KEY_INODE (~(~0 << 20))
-#define MAX_KEY_OFFSET (~0ULL >> 1)
-#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
-
-#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
-#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
-
-#define PTR_DEV_BITS 12
-
-PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
-PTR_FIELD(PTR_OFFSET, 8, 43)
-PTR_FIELD(PTR_GEN, 0, 8)
-
-#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
-
-#define MAKE_PTR(gen, offset, dev) \
- ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
-
-/* Bkey utility code */
-
-static inline unsigned long bkey_u64s(const struct bkey *k)
-{
- return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
-}
-
-static inline unsigned long bkey_bytes(const struct bkey *k)
-{
- return bkey_u64s(k) * sizeof(__u64);
-}
-
-#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
-
-static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
-{
- SET_KEY_INODE(dest, KEY_INODE(src));
- SET_KEY_OFFSET(dest, KEY_OFFSET(src));
-}
-
-static inline struct bkey *bkey_next(const struct bkey *k)
-{
- __u64 *d = (void *) k;
-
- return (struct bkey *) (d + bkey_u64s(k));
-}
-
-static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
-{
- __u64 *d = (void *) k;
-
- return (struct bkey *) (d + nr_keys);
-}
-/* Enough for a key with 6 pointers */
-#define BKEY_PAD 8
-
-#define BKEY_PADDED(key) \
- union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
-
-/* Superblock */
-
-/* Version 0: Cache device
- * Version 1: Backing device
- * Version 2: Seed pointer into btree node checksum
- * Version 3: Cache device with new UUID format
- * Version 4: Backing device with data offset
- */
-#define BCACHE_SB_VERSION_CDEV 0
-#define BCACHE_SB_VERSION_BDEV 1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
-#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5
-#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6
-#define BCACHE_SB_MAX_VERSION 6
-
-#define SB_SECTOR 8
-#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
-#define SB_SIZE 4096
-#define SB_LABEL_SIZE 32
-#define SB_JOURNAL_BUCKETS 256U
-/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
-#define MAX_CACHES_PER_SET 8
-
-#define BDEV_DATA_START_DEFAULT 16 /* sectors */
-
-struct cache_sb_disk {
- __le64 csum;
- __le64 offset; /* sector where this sb was written */
- __le64 version;
-
- __u8 magic[16];
-
- __u8 uuid[16];
- union {
- __u8 set_uuid[16];
- __le64 set_magic;
- };
- __u8 label[SB_LABEL_SIZE];
-
- __le64 flags;
- __le64 seq;
-
- __le64 feature_compat;
- __le64 feature_incompat;
- __le64 feature_ro_compat;
-
- __le64 pad[5];
-
- union {
- struct {
- /* Cache devices */
- __le64 nbuckets; /* device size */
-
- __le16 block_size; /* sectors */
- __le16 bucket_size; /* sectors */
-
- __le16 nr_in_set;
- __le16 nr_this_dev;
- };
- struct {
- /* Backing devices */
- __le64 data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- __le32 last_mount; /* time overflow in y2106 */
-
- __le16 first_bucket;
- union {
- __le16 njournal_buckets;
- __le16 keys;
- };
- __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
- __le16 obso_bucket_size_hi; /* obsoleted */
-};
-
-/*
- * This is for in-memory bcache super block.
- * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
- * size, ordering and even whole struct size may be different
- * from cache_sb_disk.
- */
-struct cache_sb {
- __u64 offset; /* sector where this sb was written */
- __u64 version;
-
- __u8 magic[16];
-
- __u8 uuid[16];
- union {
- __u8 set_uuid[16];
- __u64 set_magic;
- };
- __u8 label[SB_LABEL_SIZE];
-
- __u64 flags;
- __u64 seq;
-
- __u64 feature_compat;
- __u64 feature_incompat;
- __u64 feature_ro_compat;
-
- union {
- struct {
- /* Cache devices */
- __u64 nbuckets; /* device size */
-
- __u16 block_size; /* sectors */
- __u16 nr_in_set;
- __u16 nr_this_dev;
- __u32 bucket_size; /* sectors */
- };
- struct {
- /* Backing devices */
- __u64 data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- __u32 last_mount; /* time overflow in y2106 */
-
- __u16 first_bucket;
- union {
- __u16 njournal_buckets;
- __u16 keys;
- };
- __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-};
-
-static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
-{
- return sb->version == BCACHE_SB_VERSION_BDEV
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
-}
-
-BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-#define CACHE_REPLACEMENT_LRU 0U
-#define CACHE_REPLACEMENT_FIFO 1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-
-BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-#define CACHE_MODE_WRITETHROUGH 0U
-#define CACHE_MODE_WRITEBACK 1U
-#define CACHE_MODE_WRITEAROUND 2U
-#define CACHE_MODE_NONE 3U
-BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-#define BDEV_STATE_NONE 0U
-#define BDEV_STATE_CLEAN 1U
-#define BDEV_STATE_DIRTY 2U
-#define BDEV_STATE_STALE 3U
-
-/*
- * Magic numbers
- *
- * The various other data structures have their own magic numbers, which are
- * xored with the first part of the cache set's UUID
- */
-
-#define JSET_MAGIC 0x245235c1a3625032ULL
-#define PSET_MAGIC 0x6750e15f87337f91ULL
-#define BSET_MAGIC 0x90135c78b99e07f5ULL
-
-static inline __u64 jset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ JSET_MAGIC;
-}
-
-static inline __u64 pset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ PSET_MAGIC;
-}
-
-static inline __u64 bset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ BSET_MAGIC;
-}
-
-/*
- * Journal
- *
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-
-#define BCACHE_JSET_VERSION_UUIDv1 1
-#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
-#define BCACHE_JSET_VERSION 1
-
-struct jset {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 keys;
-
- __u64 last_seq;
-
- BKEY_PADDED(uuid_bucket);
- BKEY_PADDED(btree_root);
- __u16 btree_level;
- __u16 pad[3];
-
- __u64 prio_bucket[MAX_CACHES_PER_SET];
-
- union {
- struct bkey start[0];
- __u64 d[0];
- };
-};
-
-/* Bucket prios/gens */
-
-struct prio_set {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 pad;
-
- __u64 next_bucket;
-
- struct bucket_disk {
- __u16 prio;
- __u8 gen;
- } __attribute((packed)) data[];
-};
-
-/* UUIDS - per backing device/flash only volume metadata */
-
-struct uuid_entry {
- union {
- struct {
- __u8 uuid[16];
- __u8 label[32];
- __u32 first_reg; /* time overflow in y2106 */
- __u32 last_reg;
- __u32 invalidated;
-
- __u32 flags;
- /* Size of flash only volumes */
- __u64 sectors;
- };
-
- __u8 pad[128];
- };
-};
-
-BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
-
-/* Btree nodes */
-
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_CSUM 1
-#define BCACHE_BSET_VERSION 1
-
-/*
- * Btree nodes
- *
- * On disk a btree node is a list/log of these; within each set the keys are
- * sorted
- */
-struct bset {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 keys;
-
- union {
- struct bkey start[0];
- __u64 d[0];
- };
-};
-
-/* OBSOLETE */
-
-/* UUIDS - per backing device/flash only volume metadata */
-
-struct uuid_entry_v0 {
- __u8 uuid[16];
- __u8 label[32];
- __u32 first_reg;
- __u32 last_reg;
- __u32 invalidated;
- __u32 pad;
-};
-
-#endif /* _LINUX_BCACHE_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 791f31dd0abe..ba5af15e25f5 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -906,6 +906,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_RINGBUF,
BPF_MAP_TYPE_INODE_STORAGE,
BPF_MAP_TYPE_TASK_STORAGE,
+ BPF_MAP_TYPE_BLOOM_FILTER,
};
/* Note that tracing related programs such as
@@ -1274,6 +1275,13 @@ union bpf_attr {
* struct stored as the
* map value
*/
+ /* Any per-map-type extra fields
+ *
+ * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
+ * number of hash functions (if 0, the bloom filter will default
+ * to using 5 hash functions).
+ */
+ __u64 map_extra;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -1629,7 +1637,7 @@ union bpf_attr {
* u32 bpf_get_smp_processor_id(void)
* Description
* Get the SMP (symmetric multiprocessing) processor id. Note that
- * all programs run with preemption disabled, which means that the
+ * all programs run with migration disabled, which means that the
* SMP processor id is stable during all the execution of the
* program.
* Return
@@ -4046,7 +4054,7 @@ union bpf_attr {
* arguments. The *data* are a **u64** array and corresponding format string
* values are stored in the array. For strings and pointers where pointees
* are accessed, only the pointer values are stored in the *data* array.
- * The *data_len* is the size of *data* in bytes.
+ * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
*
* Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
* Reading kernel memory may fail due to either invalid address or
@@ -4751,7 +4759,8 @@ union bpf_attr {
* Each format specifier in **fmt** corresponds to one u64 element
* in the **data** array. For strings and pointers where pointees
* are accessed, only the pointer values are stored in the *data*
- * array. The *data_len* is the size of *data* in bytes.
+ * array. The *data_len* is the size of *data* in bytes - must be
+ * a multiple of 8.
*
* Formats **%s** and **%p{i,I}{4,6}** require to read kernel
* memory. Reading kernel memory may fail due to either invalid
@@ -4877,6 +4886,58 @@ union bpf_attr {
* Get the struct pt_regs associated with **task**.
* Return
* A pointer to struct pt_regs.
+ *
+ * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
+ * Description
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ * to format and can handle more format args as a result.
+ *
+ * Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ * Return
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
+ *
+ * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
+ * Description
+ * Get the address of a kernel symbol, returned in *res*. *res* is
+ * set to 0 if the symbol is not found.
+ * Return
+ * On success, zero. On error, a negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-EINVAL** if string *name* is not the same size as *name_sz*.
+ *
+ * **-ENOENT** if symbol is not found.
+ *
+ * **-EPERM** if caller does not have permission to obtain kernel address.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5055,6 +5116,10 @@ union bpf_attr {
FN(get_func_ip), \
FN(get_attach_cookie), \
FN(task_pt_regs), \
+ FN(get_branch_snapshot), \
+ FN(trace_vprintk), \
+ FN(skc_to_unix_sock), \
+ FN(kallsyms_lookup_name), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5284,6 +5349,8 @@ struct __sk_buff {
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
__u32 gso_size;
+ __u32 :32; /* Padding, future use. */
+ __u64 hwtstamp;
};
struct bpf_tunnel_key {
@@ -5577,6 +5644,7 @@ struct bpf_prog_info {
__u64 run_time_ns;
__u64 run_cnt;
__u64 recursion_misses;
+ __u32 verified_insns;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -5594,6 +5662,8 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
+ __u32 :32; /* alignment pad */
+ __u64 map_extra;
} __attribute__((aligned(8)));
struct bpf_btf_info {
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index d27b1708efe9..deb12f755f0f 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -43,7 +43,7 @@ struct btf_type {
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC, FUNC_PROTO and VAR.
+ * FUNC, FUNC_PROTO, VAR and DECL_TAG.
* "type" is a type_id referring to another type.
*/
union {
@@ -56,25 +56,29 @@ struct btf_type {
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
-#define BTF_KIND_UNKN 0 /* Unknown */
-#define BTF_KIND_INT 1 /* Integer */
-#define BTF_KIND_PTR 2 /* Pointer */
-#define BTF_KIND_ARRAY 3 /* Array */
-#define BTF_KIND_STRUCT 4 /* Struct */
-#define BTF_KIND_UNION 5 /* Union */
-#define BTF_KIND_ENUM 6 /* Enumeration */
-#define BTF_KIND_FWD 7 /* Forward */
-#define BTF_KIND_TYPEDEF 8 /* Typedef */
-#define BTF_KIND_VOLATILE 9 /* Volatile */
-#define BTF_KIND_CONST 10 /* Const */
-#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_FUNC 12 /* Function */
-#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
-#define BTF_KIND_VAR 14 /* Variable */
-#define BTF_KIND_DATASEC 15 /* Section */
-#define BTF_KIND_FLOAT 16 /* Floating point */
-#define BTF_KIND_MAX BTF_KIND_FLOAT
-#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
+enum {
+ BTF_KIND_UNKN = 0, /* Unknown */
+ BTF_KIND_INT = 1, /* Integer */
+ BTF_KIND_PTR = 2, /* Pointer */
+ BTF_KIND_ARRAY = 3, /* Array */
+ BTF_KIND_STRUCT = 4, /* Struct */
+ BTF_KIND_UNION = 5, /* Union */
+ BTF_KIND_ENUM = 6, /* Enumeration */
+ BTF_KIND_FWD = 7, /* Forward */
+ BTF_KIND_TYPEDEF = 8, /* Typedef */
+ BTF_KIND_VOLATILE = 9, /* Volatile */
+ BTF_KIND_CONST = 10, /* Const */
+ BTF_KIND_RESTRICT = 11, /* Restrict */
+ BTF_KIND_FUNC = 12, /* Function */
+ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */
+ BTF_KIND_VAR = 14, /* Variable */
+ BTF_KIND_DATASEC = 15, /* Section */
+ BTF_KIND_FLOAT = 16, /* Floating point */
+ BTF_KIND_DECL_TAG = 17, /* Decl Tag */
+
+ NR_BTF_KINDS,
+ BTF_KIND_MAX = NR_BTF_KINDS - 1,
+};
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -170,4 +174,15 @@ struct btf_var_secinfo {
__u32 size;
};
+/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe
+ * additional information related to the tag applied location.
+ * If component_idx == -1, the tag is applied to a struct, union,
+ * variable or function. Otherwise, it is applied to a struct/union
+ * member or a func argument, and component_idx indicates which member
+ * or argument (0 ... vlen-1).
+ */
+struct btf_decl_tag {
+ __s32 component_idx;
+};
+
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index d7d3cfead056..738619994e26 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -771,10 +771,16 @@ struct btrfs_ioctl_received_subvol_args {
*/
#define BTRFS_SEND_FLAG_OMIT_END_CMD 0x4
+/*
+ * Read the protocol version in the structure
+ */
+#define BTRFS_SEND_FLAG_VERSION 0x8
+
#define BTRFS_SEND_FLAG_MASK \
(BTRFS_SEND_FLAG_NO_FILE_DATA | \
BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | \
- BTRFS_SEND_FLAG_OMIT_END_CMD)
+ BTRFS_SEND_FLAG_OMIT_END_CMD | \
+ BTRFS_SEND_FLAG_VERSION)
struct btrfs_ioctl_send_args {
__s64 send_fd; /* in */
@@ -782,7 +788,8 @@ struct btrfs_ioctl_send_args {
__u64 __user *clone_sources; /* in */
__u64 parent_root; /* in */
__u64 flags; /* in */
- __u64 reserved[4]; /* in */
+ __u32 version; /* in */
+ __u8 reserved[28]; /* in */
};
/*
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index f730d443b918..75b85c60efb2 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -101,6 +101,8 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
#define CAN_CTRLMODE_CC_LEN8_DLC 0x100 /* Classic CAN DLC option */
+#define CAN_CTRLMODE_TDC_AUTO 0x200 /* CAN transiver automatically calculates TDCV */
+#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* TDCV is manually set up by user */
/*
* CAN device statistics
@@ -134,10 +136,35 @@ enum {
IFLA_CAN_BITRATE_CONST,
IFLA_CAN_DATA_BITRATE_CONST,
IFLA_CAN_BITRATE_MAX,
- __IFLA_CAN_MAX
+ IFLA_CAN_TDC,
+
+ /* add new constants above here */
+ __IFLA_CAN_MAX,
+ IFLA_CAN_MAX = __IFLA_CAN_MAX - 1
};
-#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1)
+/*
+ * CAN FD Transmitter Delay Compensation (TDC)
+ *
+ * Please refer to struct can_tdc_const and can_tdc in
+ * include/linux/can/bittiming.h for further details.
+ */
+enum {
+ IFLA_CAN_TDC_UNSPEC,
+ IFLA_CAN_TDC_TDCV_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCV_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCO_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCO_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCF_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCF_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCV, /* u32 */
+ IFLA_CAN_TDC_TDCO, /* u32 */
+ IFLA_CAN_TDC_TDCF, /* u32 */
+
+ /* add new constants above here */
+ __IFLA_CAN_TDC,
+ IFLA_CAN_TDC_MAX = __IFLA_CAN_TDC - 1
+};
/* u16 termination range: 1..65535 Ohms */
#define CAN_TERMINATION_DISABLED 0
diff --git a/include/uapi/linux/cdrom.h b/include/uapi/linux/cdrom.h
index 6c34f6e2f1f7..804ff8d98f71 100644
--- a/include/uapi/linux/cdrom.h
+++ b/include/uapi/linux/cdrom.h
@@ -147,6 +147,8 @@
#define CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */
#define CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */
+#define CDROM_TIMED_MEDIA_CHANGE 0x5396 /* get the timestamp of the last media change */
+
/*******************************************************
* CDROM IOCTL structures
*******************************************************/
@@ -295,6 +297,23 @@ struct cdrom_generic_command
};
};
+/* This struct is used by CDROM_TIMED_MEDIA_CHANGE */
+struct cdrom_timed_media_change_info {
+ __s64 last_media_change; /* Timestamp of the last detected media
+ * change in ms. May be set by caller,
+ * updated upon successful return of
+ * ioctl.
+ */
+ __u64 media_flags; /* Flags returned by ioctl to indicate
+ * media status.
+ */
+};
+#define MEDIA_CHANGED_FLAG 0x1 /* Last detected media change was more
+ * recent than last_media_change set by
+ * caller.
+ */
+/* other bits of media_flags available for future use */
+
/*
* A CD-ROM physical sector size is 2048, 2052, 2056, 2324, 2332, 2336,
* 2340, or 2352 bytes long.
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
new file mode 100644
index 000000000000..d0aa95aeff7b
--- /dev/null
+++ b/include/uapi/linux/counter.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace ABI for Counter character devices
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#ifndef _UAPI_COUNTER_H_
+#define _UAPI_COUNTER_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Component type definitions */
+enum counter_component_type {
+ COUNTER_COMPONENT_NONE,
+ COUNTER_COMPONENT_SIGNAL,
+ COUNTER_COMPONENT_COUNT,
+ COUNTER_COMPONENT_FUNCTION,
+ COUNTER_COMPONENT_SYNAPSE_ACTION,
+ COUNTER_COMPONENT_EXTENSION,
+};
+
+/* Component scope definitions */
+enum counter_scope {
+ COUNTER_SCOPE_DEVICE,
+ COUNTER_SCOPE_SIGNAL,
+ COUNTER_SCOPE_COUNT,
+};
+
+/**
+ * struct counter_component - Counter component identification
+ * @type: component type (one of enum counter_component_type)
+ * @scope: component scope (one of enum counter_scope)
+ * @parent: parent ID (matching the ID suffix of the respective parent sysfs
+ * path as described by the ABI documentation file
+ * Documentation/ABI/testing/sysfs-bus-counter)
+ * @id: component ID (matching the ID provided by the respective *_component_id
+ * sysfs attribute of the desired component)
+ *
+ * For example, if the Count 2 ceiling extension of Counter device 4 is desired,
+ * set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to
+ * COUNTER_COUNT_SCOPE, parent equal to 2, and id equal to the value provided by
+ * the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id
+ * sysfs attribute.
+ */
+struct counter_component {
+ __u8 type;
+ __u8 scope;
+ __u8 parent;
+ __u8 id;
+};
+
+/* Event type definitions */
+enum counter_event_type {
+ /* Count value increased past ceiling */
+ COUNTER_EVENT_OVERFLOW,
+ /* Count value decreased past floor */
+ COUNTER_EVENT_UNDERFLOW,
+ /* Count value increased past ceiling, or decreased past floor */
+ COUNTER_EVENT_OVERFLOW_UNDERFLOW,
+ /* Count value reached threshold */
+ COUNTER_EVENT_THRESHOLD,
+ /* Index signal detected */
+ COUNTER_EVENT_INDEX,
+};
+
+/**
+ * struct counter_watch - Counter component watch configuration
+ * @component: component to watch when event triggers
+ * @event: event that triggers (one of enum counter_event_type)
+ * @channel: event channel (typically 0 unless the device supports concurrent
+ * events of the same type)
+ */
+struct counter_watch {
+ struct counter_component component;
+ __u8 event;
+ __u8 channel;
+};
+
+/*
+ * Queues a Counter watch for the specified event.
+ *
+ * The queued watches will not be applied until COUNTER_ENABLE_EVENTS_IOCTL is
+ * called.
+ */
+#define COUNTER_ADD_WATCH_IOCTL _IOW(0x3E, 0x00, struct counter_watch)
+/*
+ * Enables monitoring the events specified by the Counter watches that were
+ * queued by COUNTER_ADD_WATCH_IOCTL.
+ *
+ * If events are already enabled, the new set of watches replaces the old one.
+ * Calling this ioctl also has the effect of clearing the queue of watches added
+ * by COUNTER_ADD_WATCH_IOCTL.
+ */
+#define COUNTER_ENABLE_EVENTS_IOCTL _IO(0x3E, 0x01)
+/*
+ * Stops monitoring the previously enabled events.
+ */
+#define COUNTER_DISABLE_EVENTS_IOCTL _IO(0x3E, 0x02)
+
+/**
+ * struct counter_event - Counter event data
+ * @timestamp: best estimate of time of event occurrence, in nanoseconds
+ * @value: component value
+ * @watch: component watch configuration
+ * @status: return status (system error number)
+ */
+struct counter_event {
+ __aligned_u64 timestamp;
+ __aligned_u64 value;
+ struct counter_watch watch;
+ __u8 status;
+};
+
+/* Count direction values */
+enum counter_count_direction {
+ COUNTER_COUNT_DIRECTION_FORWARD,
+ COUNTER_COUNT_DIRECTION_BACKWARD,
+};
+
+/* Count mode values */
+enum counter_count_mode {
+ COUNTER_COUNT_MODE_NORMAL,
+ COUNTER_COUNT_MODE_RANGE_LIMIT,
+ COUNTER_COUNT_MODE_NON_RECYCLE,
+ COUNTER_COUNT_MODE_MODULO_N,
+};
+
+/* Count function values */
+enum counter_function {
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_DECREASE,
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_QUADRATURE_X1_A,
+ COUNTER_FUNCTION_QUADRATURE_X1_B,
+ COUNTER_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_FUNCTION_QUADRATURE_X2_B,
+ COUNTER_FUNCTION_QUADRATURE_X4,
+};
+
+/* Signal values */
+enum counter_signal_level {
+ COUNTER_SIGNAL_LEVEL_LOW,
+ COUNTER_SIGNAL_LEVEL_HIGH,
+};
+
+/* Action mode values */
+enum counter_synapse_action {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+};
+
+#endif /* _UAPI_COUNTER_H_ */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 32f53a0069d6..b897b80770f6 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -551,6 +551,8 @@ enum devlink_attr {
DEVLINK_ATTR_RATE_NODE_NAME, /* string */
DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */
+ DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index f880d2831160..e83954c69fff 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -45,13 +45,13 @@ struct dlm_lock_params {
void __user *bastaddr;
struct dlm_lksb __user *lksb;
char lvb[DLM_USER_LVB_LEN];
- char name[0];
+ char name[];
};
struct dlm_lspace_params {
__u32 flags;
__u32 minor;
- char name[0];
+ char name[];
};
struct dlm_purge_params {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b6db6590baf0..a2223b685451 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -603,6 +603,7 @@ enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+ ETHTOOL_LINK_EXT_STATE_MODULE,
};
/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
@@ -649,6 +650,11 @@ enum ethtool_link_ext_substate_cable_issue {
ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
};
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */
+enum ethtool_link_ext_substate_module {
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1,
+};
+
#define ETH_GSTRING_LEN 32
/**
@@ -707,6 +713,29 @@ enum ethtool_stringset {
};
/**
+ * enum ethtool_module_power_mode_policy - plug-in module power mode policy
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
+ * to high power mode when the first port using it is put administratively
+ * up and to low power mode when the last port using it is put
+ * administratively down.
+ */
+enum ethtool_module_power_mode_policy {
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1,
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO,
+};
+
+/**
+ * enum ethtool_module_power_mode - plug-in module power mode
+ * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode.
+ */
+enum ethtool_module_power_mode {
+ ETHTOOL_MODULE_POWER_MODE_LOW = 1,
+ ETHTOOL_MODULE_POWER_MODE_HIGH,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 5545f1ca9237..999777d32dcf 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -47,6 +47,8 @@ enum {
ETHTOOL_MSG_MODULE_EEPROM_GET,
ETHTOOL_MSG_STATS_GET,
ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ ETHTOOL_MSG_MODULE_GET,
+ ETHTOOL_MSG_MODULE_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -90,6 +92,8 @@ enum {
ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY,
ETHTOOL_MSG_STATS_GET_REPLY,
ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
+ ETHTOOL_MSG_MODULE_GET_REPLY,
+ ETHTOOL_MSG_MODULE_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -407,7 +411,9 @@ enum {
ETHTOOL_A_PAUSE_STAT_TX_FRAMES,
ETHTOOL_A_PAUSE_STAT_RX_FRAMES,
- /* add new constants above here */
+ /* add new constants above here
+ * adjust ETHTOOL_PAUSE_STAT_CNT if adding non-stats!
+ */
__ETHTOOL_A_PAUSE_STAT_CNT,
ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1)
};
@@ -833,6 +839,19 @@ enum {
ETHTOOL_A_STATS_RMON_MAX = (__ETHTOOL_A_STATS_RMON_CNT - 1)
};
+/* MODULE */
+
+enum {
+ ETHTOOL_A_MODULE_UNSPEC,
+ ETHTOOL_A_MODULE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MODULE_POWER_MODE_POLICY, /* u8 */
+ ETHTOOL_A_MODULE_POWER_MODE, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MODULE_CNT,
+ ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index 64553df9d735..bd1932c2074d 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -20,6 +20,7 @@
#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FAN_FS_ERROR 0x00008000 /* Filesystem error */
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
@@ -125,6 +126,7 @@ struct fanotify_event_metadata {
#define FAN_EVENT_INFO_TYPE_DFID_NAME 2
#define FAN_EVENT_INFO_TYPE_DFID 3
#define FAN_EVENT_INFO_TYPE_PIDFD 4
+#define FAN_EVENT_INFO_TYPE_ERROR 5
/* Variable length info record following event metadata */
struct fanotify_event_info_header {
@@ -159,6 +161,12 @@ struct fanotify_event_info_pidfd {
__s32 pidfd;
};
+struct fanotify_event_info_error {
+ struct fanotify_event_info_header hdr;
+ __s32 error;
+ __u32 error_count;
+};
+
struct fanotify_response {
__s32 fd;
__u32 response;
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 36ed092227fa..a1dc3ee1d17c 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -184,6 +184,9 @@
*
* 7.34
* - add FUSE_SYNCFS
+ *
+ * 7.35
+ * - add FOPEN_NOFLUSH
*/
#ifndef _LINUX_FUSE_H
@@ -219,7 +222,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 34
+#define FUSE_KERNEL_MINOR_VERSION 35
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -290,12 +293,14 @@ struct fuse_file_lock {
* FOPEN_NONSEEKABLE: the file is not seekable
* FOPEN_CACHE_DIR: allow caching this directory
* FOPEN_STREAM: the file is stream-like (no file position at all)
+ * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
#define FOPEN_CACHE_DIR (1 << 3)
#define FOPEN_STREAM (1 << 4)
+#define FOPEN_NOFLUSH (1 << 5)
/**
* INIT request/reply flags
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index 235e5b2facaa..71a5df8d2689 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -44,6 +44,31 @@
FUTEX_PRIVATE_FLAG)
/*
+ * Flags to specify the bit length of the futex word for futex2 syscalls.
+ * Currently, only 32 is supported.
+ */
+#define FUTEX_32 2
+
+/*
+ * Max numbers of elements in a futex_waitv array
+ */
+#define FUTEX_WAITV_MAX 128
+
+/**
+ * struct futex_waitv - A waiter for vectorized wait
+ * @val: Expected value at uaddr
+ * @uaddr: User address to wait on
+ * @flags: Flags for this waiter
+ * @__reserved: Reserved member to preserve data alignment. Should be 0.
+ */
+struct futex_waitv {
+ __u64 val;
+ __u64 uaddr;
+ __u32 flags;
+ __u32 __reserved;
+};
+
+/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*/
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 5f589c7a8382..5da4ee234e0b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -86,6 +86,7 @@
* over Ethernet
*/
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index b270a07b285e..c45b5e9a9387 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -158,6 +158,7 @@ enum {
#define IORING_TIMEOUT_BOOTTIME (1U << 2)
#define IORING_TIMEOUT_REALTIME (1U << 3)
#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
+#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
/*
diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h
index bae14636a8c8..829ffdfcacca 100644
--- a/include/uapi/linux/ioam6_iptunnel.h
+++ b/include/uapi/linux/ioam6_iptunnel.h
@@ -9,9 +9,38 @@
#ifndef _UAPI_LINUX_IOAM6_IPTUNNEL_H
#define _UAPI_LINUX_IOAM6_IPTUNNEL_H
+/* Encap modes:
+ * - inline: direct insertion
+ * - encap: ip6ip6 encapsulation
+ * - auto: inline for local packets, encap for in-transit packets
+ */
+enum {
+ __IOAM6_IPTUNNEL_MODE_MIN,
+
+ IOAM6_IPTUNNEL_MODE_INLINE,
+ IOAM6_IPTUNNEL_MODE_ENCAP,
+ IOAM6_IPTUNNEL_MODE_AUTO,
+
+ __IOAM6_IPTUNNEL_MODE_MAX,
+};
+
+#define IOAM6_IPTUNNEL_MODE_MIN (__IOAM6_IPTUNNEL_MODE_MIN + 1)
+#define IOAM6_IPTUNNEL_MODE_MAX (__IOAM6_IPTUNNEL_MODE_MAX - 1)
+
enum {
IOAM6_IPTUNNEL_UNSPEC,
+
+ /* Encap mode */
+ IOAM6_IPTUNNEL_MODE, /* u8 */
+
+ /* Tunnel dst address.
+ * For encap,auto modes.
+ */
+ IOAM6_IPTUNNEL_DST, /* struct in6_addr */
+
+ /* IOAM Trace Header */
IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */
+
__IOAM6_IPTUNNEL_MAX,
};
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index e42d13b55cf3..e00bbb9c47bb 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -169,6 +169,7 @@ enum
IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
IPV4_DEVCONF_BC_FORWARDING,
+ IPV4_DEVCONF_ARP_EVICT_NOCARRIER,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 32d148309b16..966c3070959b 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -81,6 +81,20 @@ struct ipmi_ipmb_addr {
};
/*
+ * Used for messages received directly from an IPMB that have not gone
+ * through a MC. This is for systems that sit right on an IPMB so
+ * they can receive commands and respond to them.
+ */
+#define IPMI_IPMB_DIRECT_ADDR_TYPE 0x81
+struct ipmi_ipmb_direct_addr {
+ int addr_type;
+ short channel;
+ unsigned char slave_addr;
+ unsigned char rs_lun;
+ unsigned char rq_lun;
+};
+
+/*
* A LAN Address. This is an address to/from a LAN interface bridged
* by the BMC, not an address actually out on the LAN.
*
@@ -158,7 +172,7 @@ struct kernel_ipmi_msg {
* is used for the receive in-kernel interface and in the receive
* IOCTL.
*
- * The "IPMI_RESPONSE_RESPNOSE_TYPE" is a little strange sounding, but
+ * The "IPMI_RESPONSE_RESPONSE_TYPE" is a little strange sounding, but
* it allows you to get the message results when you send a response
* message.
*/
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index b243a53fa985..d4178dace0bf 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -193,6 +193,7 @@ enum {
DEVCONF_IOAM6_ENABLED,
DEVCONF_IOAM6_ID,
DEVCONF_IOAM6_ID_WIDE,
+ DEVCONF_NDISC_EVICT_NOCARRIER,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a067410ebea5..1daa45268de2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -269,6 +269,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_AP_RESET_HOLD 32
#define KVM_EXIT_X86_BUS_LOCK 33
#define KVM_EXIT_XEN 34
+#define KVM_EXIT_RISCV_SBI 35
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -397,13 +398,23 @@ struct kvm_run {
* "ndata" is correct, that new fields are enumerated in "flags",
* and that each flag enumerates fields that are 64-bit aligned
* and sized (so that ndata+internal.data[] is valid/accurate).
+ *
+ * Space beyond the defined fields may be used to store arbitrary
+ * debug information relating to the emulation failure. It is
+ * accounted for in "ndata" but the format is unspecified and is
+ * not represented in "flags". Any such information is *not* ABI!
*/
struct {
__u32 suberror;
__u32 ndata;
__u64 flags;
- __u8 insn_size;
- __u8 insn_bytes[15];
+ union {
+ struct {
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ };
+ };
+ /* Arbitrary debug data may follow. */
} emulation_failure;
/* KVM_EXIT_OSI */
struct {
@@ -469,6 +480,13 @@ struct kvm_run {
} msr;
/* KVM_EXIT_XEN */
struct kvm_xen_exit xen;
+ /* KVM_EXIT_RISCV_SBI */
+ struct {
+ unsigned long extension_id;
+ unsigned long function_id;
+ unsigned long args[6];
+ unsigned long ret[2];
+ } riscv_sbi;
/* Fix the size of the union. */
char padding[256];
};
@@ -1112,6 +1130,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_BINARY_STATS_FD 203
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
#define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1223,11 +1242,16 @@ struct kvm_irqfd {
/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
#define KVM_CLOCK_TSC_STABLE 2
+#define KVM_CLOCK_REALTIME (1 << 2)
+#define KVM_CLOCK_HOST_TSC (1 << 3)
struct kvm_clock_data {
__u64 clock;
__u32 flags;
- __u32 pad[9];
+ __u32 pad0;
+ __u64 realtime;
+ __u64 host_tsc;
+ __u32 pad[4];
};
/* For KVM_CAP_SW_TLB */
diff --git a/include/uapi/linux/map_to_14segment.h b/include/uapi/linux/map_to_14segment.h
new file mode 100644
index 000000000000..0346ef76543b
--- /dev/null
+++ b/include/uapi/linux/map_to_14segment.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2021 Glider bv
+ *
+ * Based on include/uapi/linux/map_to_7segment.h:
+
+ * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
+ */
+
+#ifndef MAP_TO_14SEGMENT_H
+#define MAP_TO_14SEGMENT_H
+
+/* This file provides translation primitives and tables for the conversion
+ * of (ASCII) characters to a 14-segments notation.
+ *
+ * The 14 segment's wikipedia notation below is used as standard.
+ * See: https://en.wikipedia.org/wiki/Fourteen-segment_display
+ *
+ * Notation: +---a---+
+ * |\ | /|
+ * f h i j b
+ * | \|/ |
+ * +-g1+-g2+
+ * | /|\ |
+ * e k l m c
+ * |/ | \|
+ * +---d---+
+ *
+ * Usage:
+ *
+ * Register a map variable, and fill it with a character set:
+ * static SEG14_DEFAULT_MAP(map_seg14);
+ *
+ *
+ * Then use for conversion:
+ * seg14 = map_to_seg14(&map_seg14, some_char);
+ * ...
+ *
+ * In device drivers it is recommended, if required, to make the char map
+ * accessible via the sysfs interface using the following scheme:
+ *
+ * static ssize_t map_seg14_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * memcpy(buf, &map_seg14, sizeof(map_seg14));
+ * return sizeof(map_seg14);
+ * }
+ * static ssize_t map_seg14_store(struct device *dev,
+ * struct device_attribute *attr,
+ * const char *buf, size_t cnt)
+ * {
+ * if (cnt != sizeof(map_seg14))
+ * return -EINVAL;
+ * memcpy(&map_seg14, buf, cnt);
+ * return cnt;
+ * }
+ * static DEVICE_ATTR_RW(map_seg14);
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#define BIT_SEG14_A 0
+#define BIT_SEG14_B 1
+#define BIT_SEG14_C 2
+#define BIT_SEG14_D 3
+#define BIT_SEG14_E 4
+#define BIT_SEG14_F 5
+#define BIT_SEG14_G1 6
+#define BIT_SEG14_G2 7
+#define BIT_SEG14_H 8
+#define BIT_SEG14_I 9
+#define BIT_SEG14_J 10
+#define BIT_SEG14_K 11
+#define BIT_SEG14_L 12
+#define BIT_SEG14_M 13
+#define BIT_SEG14_RESERVED1 14
+#define BIT_SEG14_RESERVED2 15
+
+struct seg14_conversion_map {
+ __be16 table[128];
+};
+
+static __inline__ int map_to_seg14(struct seg14_conversion_map *map, int c)
+{
+ if (c < 0 || c >= sizeof(map->table) / sizeof(map->table[0]))
+ return -EINVAL;
+
+ return __be16_to_cpu(map->table[c]);
+}
+
+#define SEG14_CONVERSION_MAP(_name, _map) \
+ struct seg14_conversion_map _name = { .table = { _map } }
+
+/*
+ * It is recommended to use a facility that allows user space to redefine
+ * custom character sets for LCD devices. Please use a sysfs interface
+ * as described above.
+ */
+#define MAP_TO_SEG14_SYSFS_FILE "map_seg14"
+
+/*******************************************************************************
+ * ASCII conversion table
+ ******************************************************************************/
+
+#define _SEG14(sym, a, b, c, d, e, f, g1, g2, h, j, k, l, m, n) \
+ __cpu_to_be16( a << BIT_SEG14_A | b << BIT_SEG14_B | \
+ c << BIT_SEG14_C | d << BIT_SEG14_D | \
+ e << BIT_SEG14_E | f << BIT_SEG14_F | \
+ g1 << BIT_SEG14_G1 | g2 << BIT_SEG14_G2 | \
+ h << BIT_SEG14_H | j << BIT_SEG14_I | \
+ k << BIT_SEG14_J | l << BIT_SEG14_K | \
+ m << BIT_SEG14_L | n << BIT_SEG14_M )
+
+#define _MAP_0_32_ASCII_SEG14_NON_PRINTABLE \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+
+#define _MAP_33_47_ASCII_SEG14_SYMBOL \
+ _SEG14('!', 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('"', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), \
+ _SEG14('#', 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('$', 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('%', 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('&', 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1), \
+ _SEG14('\'',0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), \
+ _SEG14('(', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14(')', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0), \
+ _SEG14('*', 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1), \
+ _SEG14('+', 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14(',', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('-', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('.', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('/', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0),
+
+#define _MAP_48_57_ASCII_SEG14_NUMERIC \
+ _SEG14('0', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('1', 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0), \
+ _SEG14('2', 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('3', 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('4', 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('5', 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('6', 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('7', 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0), \
+ _SEG14('8', 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('9', 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0),
+
+#define _MAP_58_64_ASCII_SEG14_SYMBOL \
+ _SEG14(':', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14(';', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0), \
+ _SEG14('<', 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14('=', 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('>', 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0), \
+ _SEG14('?', 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0), \
+ _SEG14('@', 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0),
+
+#define _MAP_65_90_ASCII_SEG14_ALPHA_UPPER \
+ _SEG14('A', 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('B', 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('C', 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('D', 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('E', 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('F', 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('G', 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('H', 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('I', 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('J', 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('K', 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14('L', 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('M', 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0), \
+ _SEG14('N', 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1), \
+ _SEG14('O', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('P', 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('Q', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('R', 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1), \
+ _SEG14('S', 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('T', 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('U', 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('V', 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('W', 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('X', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1), \
+ _SEG14('Y', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0), \
+ _SEG14('Z', 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0),
+
+#define _MAP_91_96_ASCII_SEG14_SYMBOL \
+ _SEG14('[', 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('\\',0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1), \
+ _SEG14(']', 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('^', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('_', 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('`', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
+
+#define _MAP_97_122_ASCII_SEG14_ALPHA_LOWER \
+ _SEG14('a', 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('b', 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('c', 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('d', 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0), \
+ _SEG14('e', 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('f', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0), \
+ _SEG14('g', 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0), \
+ _SEG14('h', 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('i', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('j', 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0), \
+ _SEG14('k', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1), \
+ _SEG14('l', 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('m', 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0), \
+ _SEG14('n', 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('o', 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('p', 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0), \
+ _SEG14('q', 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0), \
+ _SEG14('r', 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('s', 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1), \
+ _SEG14('t', 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('u', 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('v', 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('w', 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('x', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), \
+ _SEG14('y', 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0), \
+ _SEG14('z', 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0),
+
+#define _MAP_123_126_ASCII_SEG14_SYMBOL \
+ _SEG14('{', 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0), \
+ _SEG14('|', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('}', 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1), \
+ _SEG14('~', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0),
+
+/* Maps */
+#define MAP_ASCII14SEG_ALPHANUM \
+ _MAP_0_32_ASCII_SEG14_NON_PRINTABLE \
+ _MAP_33_47_ASCII_SEG14_SYMBOL \
+ _MAP_48_57_ASCII_SEG14_NUMERIC \
+ _MAP_58_64_ASCII_SEG14_SYMBOL \
+ _MAP_65_90_ASCII_SEG14_ALPHA_UPPER \
+ _MAP_91_96_ASCII_SEG14_SYMBOL \
+ _MAP_97_122_ASCII_SEG14_ALPHA_LOWER \
+ _MAP_123_126_ASCII_SEG14_SYMBOL
+
+#define SEG14_DEFAULT_MAP(_name) \
+ SEG14_CONVERSION_MAP(_name, MAP_ASCII14SEG_ALPHANUM)
+
+#endif /* MAP_TO_14SEGMENT_H */
diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h
index 52b54d13f385..07b0318716fc 100644
--- a/include/uapi/linux/mctp.h
+++ b/include/uapi/linux/mctp.h
@@ -10,6 +10,8 @@
#define __UAPI_MCTP_H
#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
typedef __u8 mctp_eid_t;
@@ -18,11 +20,21 @@ struct mctp_addr {
};
struct sockaddr_mctp {
- unsigned short int smctp_family;
- int smctp_network;
+ __kernel_sa_family_t smctp_family;
+ __u16 __smctp_pad0;
+ unsigned int smctp_network;
struct mctp_addr smctp_addr;
__u8 smctp_type;
__u8 smctp_tag;
+ __u8 __smctp_pad1;
+};
+
+struct sockaddr_mctp_ext {
+ struct sockaddr_mctp smctp_base;
+ int smctp_ifindex;
+ __u8 smctp_halen;
+ __u8 __smctp_pad0[3];
+ __u8 smctp_haddr[MAX_ADDR_LEN];
};
#define MCTP_NET_ANY 0x0
@@ -33,4 +45,6 @@ struct sockaddr_mctp {
#define MCTP_TAG_MASK 0x07
#define MCTP_TAG_OWNER 0x08
+#define MCTP_OPT_ADDR_EXT 1
+
#endif /* __UAPI_MCTP_H */
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index bdf77dffa5a4..c54e6eae5366 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -53,12 +53,14 @@
#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
#define MDIO_AN_EEE_ADV2 62 /* EEE advertisement 2 */
#define MDIO_AN_EEE_LPABLE2 63 /* EEE link partner ability 2 */
+#define MDIO_AN_CTRL2 64 /* AN THP bypass request control */
/* Media-dependent registers. */
#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
* Lanes B-D are numbered 134-136. */
+#define MDIO_PMA_10GBR_FSRT_CSR 147 /* 10GBASE-R fast retrain status and control */
#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
@@ -239,6 +241,9 @@
#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+/* PMA 10GBASE-R Fast Retrain status and control register. */
+#define MDIO_PMA_10GBR_FSRT_ENABLE 0x0001 /* Fast retrain enable */
+
/* PCS 10GBASE-R/-T status register 1. */
#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */
@@ -247,6 +252,7 @@
#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00
/* AN 10GBASE-T control register. */
+#define MDIO_AN_10GBT_CTRL_ADVFSRT2_5G 0x0020 /* Advertise 2.5GBASE-T fast retrain */
#define MDIO_AN_10GBT_CTRL_ADV2_5G 0x0080 /* Advertise 2.5GBASE-T */
#define MDIO_AN_10GBT_CTRL_ADV5G 0x0100 /* Advertise 5GBASE-T */
#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */
@@ -289,6 +295,9 @@
#define MDIO_EEE_2_5GT 0x0001 /* 2.5GT EEE cap */
#define MDIO_EEE_5GT 0x0002 /* 5GT EEE cap */
+/* AN MultiGBASE-T AN control 2 */
+#define MDIO_AN_THP_BP2_5GT 0x0008 /* 2.5GT THP bypass request */
+
/* 2.5G/5G Extended abilities register. */
#define MDIO_PMA_NG_EXTABLE_2_5GBT 0x0001 /* 2.5GBASET ability */
#define MDIO_PMA_NG_EXTABLE_5GBT 0x0002 /* 5GBASET ability */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index f66038b9551f..c8cc46f80a16 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -4,6 +4,13 @@
#include <linux/const.h>
#include <linux/types.h>
+#include <linux/in.h> /* for sockaddr_in */
+#include <linux/in6.h> /* for sockaddr_in6 */
+#include <linux/socket.h> /* for sockaddr_storage and sa_family */
+
+#ifndef __KERNEL__
+#include <sys/socket.h> /* for struct sockaddr */
+#endif
#define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0)
#define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1)
@@ -193,4 +200,32 @@ enum mptcp_event_attr {
#define MPTCP_RST_EBADPERF 5
#define MPTCP_RST_EMIDDLEBOX 6
+struct mptcp_subflow_data {
+ __u32 size_subflow_data; /* size of this structure in userspace */
+ __u32 num_subflows; /* must be 0, set by kernel */
+ __u32 size_kernel; /* must be 0, set by kernel */
+ __u32 size_user; /* size of one element in data[] */
+} __attribute__((aligned(8)));
+
+struct mptcp_subflow_addrs {
+ union {
+ __kernel_sa_family_t sa_family;
+ struct sockaddr sa_local;
+ struct sockaddr_in sin_local;
+ struct sockaddr_in6 sin6_local;
+ struct __kernel_sockaddr_storage ss_local;
+ };
+ union {
+ struct sockaddr sa_remote;
+ struct sockaddr_in sin_remote;
+ struct sockaddr_in6 sin6_remote;
+ struct __kernel_sockaddr_storage ss_remote;
+ };
+};
+
+/* MPTCP socket options */
+#define MPTCP_INFO 1
+#define MPTCP_TCPINFO 2
+#define MPTCP_SUBFLOW_ADDRS 3
+
#endif /* _UAPI_MPTCP_H */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 00a60695fa53..db05fb55055e 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -31,6 +31,7 @@ enum {
NDA_PROTOCOL, /* Originator of entry */
NDA_NH_ID,
NDA_FDB_EXT_ATTRS,
+ NDA_FLAGS_EXT,
__NDA_MAX
};
@@ -40,14 +41,16 @@ enum {
* Neighbor Cache Entry Flags
*/
-#define NTF_USE 0x01
-#define NTF_SELF 0x02
-#define NTF_MASTER 0x04
-#define NTF_PROXY 0x08 /* == ATF_PUBL */
-#define NTF_EXT_LEARNED 0x10
-#define NTF_OFFLOADED 0x20
-#define NTF_STICKY 0x40
-#define NTF_ROUTER 0x80
+#define NTF_USE (1 << 0)
+#define NTF_SELF (1 << 1)
+#define NTF_MASTER (1 << 2)
+#define NTF_PROXY (1 << 3) /* == ATF_PUBL */
+#define NTF_EXT_LEARNED (1 << 4)
+#define NTF_OFFLOADED (1 << 5)
+#define NTF_STICKY (1 << 6)
+#define NTF_ROUTER (1 << 7)
+/* Extended flags under NDA_FLAGS_EXT: */
+#define NTF_EXT_MANAGED (1 << 0)
/*
* Neighbor Cache Entry States.
@@ -65,12 +68,22 @@ enum {
#define NUD_PERMANENT 0x80
#define NUD_NONE 0x00
-/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
- * and make no address resolution or NUD.
- * NUD_PERMANENT also cannot be deleted by garbage collectors.
+/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change and make no
+ * address resolution or NUD.
+ *
+ * NUD_PERMANENT also cannot be deleted by garbage collectors. This holds true
+ * for dynamic entries with NTF_EXT_LEARNED flag as well. However, upon carrier
+ * down event, NUD_PERMANENT entries are not flushed whereas NTF_EXT_LEARNED
+ * flagged entries explicitly are (which is also consistent with the routing
+ * subsystem).
+ *
* When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
* states don't make sense and thus are ignored. Such entries don't age and
* can roam.
+ *
+ * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf
+ * of a user space control plane, and automatically refreshed so that (if
+ * possible) they remain in NUD_REACHABLE state.
*/
struct nda_cacheinfo {
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ef9a44286e23..53411ccc69db 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -51,6 +51,7 @@ enum nf_inet_hooks {
enum nf_dev_hooks {
NF_NETDEV_INGRESS,
+ NF_NETDEV_EGRESS,
NF_NETDEV_NUMHOOKS
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index e94d1fa554cb..466fd3f4447c 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -753,11 +753,13 @@ enum nft_dynset_attributes {
* @NFT_PAYLOAD_LL_HEADER: link layer header
* @NFT_PAYLOAD_NETWORK_HEADER: network header
* @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
+ * @NFT_PAYLOAD_INNER_HEADER: inner header / payload
*/
enum nft_payload_bases {
NFT_PAYLOAD_LL_HEADER,
NFT_PAYLOAD_NETWORK_HEADER,
NFT_PAYLOAD_TRANSPORT_HEADER,
+ NFT_PAYLOAD_INNER_HEADER,
};
/**
@@ -896,7 +898,8 @@ enum nft_meta_keys {
NFT_META_OIF,
NFT_META_IIFNAME,
NFT_META_OIFNAME,
- NFT_META_IIFTYPE,
+ NFT_META_IFTYPE,
+#define NFT_META_IIFTYPE NFT_META_IFTYPE
NFT_META_OIFTYPE,
NFT_META_SKUID,
NFT_META_SKGID,
@@ -923,6 +926,7 @@ enum nft_meta_keys {
NFT_META_TIME_HOUR,
NFT_META_SDIF,
NFT_META_SDIFNAME,
+ __NFT_META_IIFTYPE,
};
/**
diff --git a/include/uapi/linux/nfsd/nfsfh.h b/include/uapi/linux/nfsd/nfsfh.h
deleted file mode 100644
index e29e8accc4f4..000000000000
--- a/include/uapi/linux/nfsd/nfsfh.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * This file describes the layout of the file handles as passed
- * over the wire.
- *
- * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef _UAPI_LINUX_NFSD_FH_H
-#define _UAPI_LINUX_NFSD_FH_H
-
-#include <linux/types.h>
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-
-/*
- * This is the old "dentry style" Linux NFSv2 file handle.
- *
- * The xino and xdev fields are currently used to transport the
- * ino/dev of the exported inode.
- */
-struct nfs_fhbase_old {
- __u32 fb_dcookie; /* dentry cookie - always 0xfeebbaca */
- __u32 fb_ino; /* our inode number */
- __u32 fb_dirino; /* dir inode number, 0 for directories */
- __u32 fb_dev; /* our device */
- __u32 fb_xdev;
- __u32 fb_xino;
- __u32 fb_generation;
-};
-
-/*
- * This is the new flexible, extensible style NFSv2/v3/v4 file handle.
- *
- * The file handle starts with a sequence of four-byte words.
- * The first word contains a version number (1) and three descriptor bytes
- * that tell how the remaining 3 variable length fields should be handled.
- * These three bytes are auth_type, fsid_type and fileid_type.
- *
- * All four-byte values are in host-byte-order.
- *
- * The auth_type field is deprecated and must be set to 0.
- *
- * The fsid_type identifies how the filesystem (or export point) is
- * encoded.
- * Current values:
- * 0 - 4 byte device id (ms-2-bytes major, ls-2-bytes minor), 4byte inode number
- * NOTE: we cannot use the kdev_t device id value, because kdev_t.h
- * says we mustn't. We must break it up and reassemble.
- * 1 - 4 byte user specified identifier
- * 2 - 4 byte major, 4 byte minor, 4 byte inode number - DEPRECATED
- * 3 - 4 byte device id, encoded for user-space, 4 byte inode number
- * 4 - 4 byte inode number and 4 byte uuid
- * 5 - 8 byte uuid
- * 6 - 16 byte uuid
- * 7 - 8 byte inode number and 16 byte uuid
- *
- * The fileid_type identified how the file within the filesystem is encoded.
- * The values for this field are filesystem specific, exccept that
- * filesystems must not use the values '0' or '0xff'. 'See enum fid_type'
- * in include/linux/exportfs.h for currently registered values.
- */
-struct nfs_fhbase_new {
- union {
- struct {
- __u8 fb_version_aux; /* == 1, even => nfs_fhbase_old */
- __u8 fb_auth_type_aux;
- __u8 fb_fsid_type_aux;
- __u8 fb_fileid_type_aux;
- __u32 fb_auth[1];
- /* __u32 fb_fsid[0]; floating */
- /* __u32 fb_fileid[0]; floating */
- };
- struct {
- __u8 fb_version; /* == 1, even => nfs_fhbase_old */
- __u8 fb_auth_type;
- __u8 fb_fsid_type;
- __u8 fb_fileid_type;
- __u32 fb_auth_flex[]; /* flexible-array member */
- };
- };
-};
-
-struct knfsd_fh {
- unsigned int fh_size; /* significant for NFSv3.
- * Points to the current size while building
- * a new file handle
- */
- union {
- struct nfs_fhbase_old fh_old;
- __u32 fh_pad[NFS4_FHSIZE/4];
- struct nfs_fhbase_new fh_new;
- } fh_base;
-};
-
-#define ofh_dcookie fh_base.fh_old.fb_dcookie
-#define ofh_ino fh_base.fh_old.fb_ino
-#define ofh_dirino fh_base.fh_old.fb_dirino
-#define ofh_dev fh_base.fh_old.fb_dev
-#define ofh_xdev fh_base.fh_old.fb_xdev
-#define ofh_xino fh_base.fh_old.fb_xino
-#define ofh_generation fh_base.fh_old.fb_generation
-
-#define fh_version fh_base.fh_new.fb_version
-#define fh_fsid_type fh_base.fh_new.fb_fsid_type
-#define fh_auth_type fh_base.fh_new.fb_auth_type
-#define fh_fileid_type fh_base.fh_new.fb_fileid_type
-#define fh_fsid fh_base.fh_new.fb_auth_flex
-
-/* Do not use, provided for userspace compatiblity. */
-#define fh_auth fh_base.fh_new.fb_auth
-
-#endif /* _UAPI_LINUX_NFSD_FH_H */
diff --git a/include/uapi/linux/nitro_enclaves.h b/include/uapi/linux/nitro_enclaves.h
index b945073fe544..e808f5ba124d 100644
--- a/include/uapi/linux/nitro_enclaves.h
+++ b/include/uapi/linux/nitro_enclaves.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*/
#ifndef _UAPI_LINUX_NITRO_ENCLAVES_H_
@@ -60,7 +60,7 @@
*
* Context: Process context.
* Return:
- * * 0 - Logic succesfully completed.
+ * * 0 - Logic successfully completed.
* * -1 - There was a failure in the ioctl logic.
* On failure, errno is set to:
* * EFAULT - copy_from_user() / copy_to_user() failure.
@@ -95,7 +95,7 @@
*
* Context: Process context.
* Return:
- * * 0 - Logic succesfully completed.
+ * * 0 - Logic successfully completed.
* * -1 - There was a failure in the ioctl logic.
* On failure, errno is set to:
* * EFAULT - copy_from_user() / copy_to_user() failure.
@@ -118,7 +118,7 @@
*
* Context: Process context.
* Return:
- * * 0 - Logic succesfully completed.
+ * * 0 - Logic successfully completed.
* * -1 - There was a failure in the ioctl logic.
* On failure, errno is set to:
* * EFAULT - copy_from_user() failure.
@@ -161,7 +161,7 @@
*
* Context: Process context.
* Return:
- * * 0 - Logic succesfully completed.
+ * * 0 - Logic successfully completed.
* * -1 - There was a failure in the ioctl logic.
* On failure, errno is set to:
* * EFAULT - copy_from_user() / copy_to_user() failure.
diff --git a/include/uapi/linux/nl80211-vnd-intel.h b/include/uapi/linux/nl80211-vnd-intel.h
index 0bf177b84fd9..4ed7d0b24512 100644
--- a/include/uapi/linux/nl80211-vnd-intel.h
+++ b/include/uapi/linux/nl80211-vnd-intel.h
@@ -13,6 +13,35 @@
* enum iwl_mvm_vendor_cmd - supported vendor commands
* @IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO: reports CSME connection info.
* @IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP: asks for ownership on the device.
+ * This is useful when the CSME firmware owns the device and the kernel
+ * wants to use it. In case the CSME firmware has no connection active the
+ * kernel will manage on its own to get ownership of the device.
+ * When the CSME firmware has an active connection, the user space
+ * involvement is required. The kernel will assert the RFKILL signal with
+ * the "device not owned" reason so that nobody can touch the device. Then
+ * the user space can run the following flow to be able to get connected
+ * to the very same AP the CSME firmware is currently connected to:
+ *
+ * 1) The user space (NetworkManager) boots and sees that the device is
+ * in RFKILL because the host doesn't own the device
+ * 2) The user space asks the kernel what AP the CSME firmware is
+ * connected to (with %IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO)
+ * 3) The user space checks if it has a profile that matches the reply
+ * from the CSME firmware
+ * 4) The user space installs a network to the wpa_supplicant with a
+ * specific BSSID and a specific frequency
+ * 5) The user space prevents any type of full scan
+ * 6) The user space asks iwlmei to request ownership on the device (with
+ * this command)
+ * 7) iwlmei requests ownership from the CSME firmware
+ * 8) The CSME firmware grants ownership
+ * 9) iwlmei tells iwlwifi to lift the RFKILL
+ * 10) RFKILL OFF is reported to user space
+ * 11) The host boots the device, loads the firwmare, and connects to a
+ * specific BSSID without scanning including IP as fast as it can
+ * 12) The host reports to the CSME firmware that there is a connection
+ * 13) The TCP connection is preserved and the host has connectivity
+ *
* @IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT: notifies if roaming is allowed.
* It contains a &IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN and a
* &IWL_MVM_VENDOR_ATTR_VIF_ADDR attributes.
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c2efea98e060..61cab81e920d 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -301,6 +301,29 @@
*/
/**
+ * DOC: FILS shared key crypto offload
+ *
+ * This feature is applicable to drivers running in AP mode.
+ *
+ * FILS shared key crypto offload can be advertised by drivers by setting
+ * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD flag. The drivers that support
+ * FILS shared key crypto offload should be able to encrypt and decrypt
+ * association frames for FILS shared key authentication as per IEEE 802.11ai.
+ * With this capability, for FILS key derivation, drivers depend on userspace.
+ *
+ * After FILS key derivation, userspace shares the FILS AAD details with the
+ * driver and the driver stores the same to use in decryption of association
+ * request and in encryption of association response. The below parameters
+ * should be given to the driver in %NL80211_CMD_SET_FILS_AAD.
+ * %NL80211_ATTR_MAC - STA MAC address, used for storing FILS AAD per STA
+ * %NL80211_ATTR_FILS_KEK - Used for encryption or decryption
+ * %NL80211_ATTR_FILS_NONCES - Used for encryption or decryption
+ * (STA Nonce 16 bytes followed by AP Nonce 16 bytes)
+ *
+ * Once the association is done, the driver cleans the FILS AAD data.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -337,7 +360,10 @@
* @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from
* userspace to request deletion of a virtual interface, then requires
- * attribute %NL80211_ATTR_IFINDEX.
+ * attribute %NL80211_ATTR_IFINDEX. If multiple BSSID advertisements are
+ * enabled using %NL80211_ATTR_MBSSID_CONFIG, %NL80211_ATTR_MBSSID_ELEMS,
+ * and if this command is used for the transmitting interface, then all
+ * the non-transmitting interfaces are deleted as well.
*
* @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified
* by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC.
@@ -1200,6 +1226,12 @@
* @NL80211_CMD_COLOR_CHANGE_COMPLETED: Notify userland that the color change
* has completed
*
+ * @NL80211_CMD_SET_FILS_AAD: Set FILS AAD data to the driver using -
+ * &NL80211_ATTR_MAC - for STA MAC address
+ * &NL80211_ATTR_FILS_KEK - for KEK
+ * &NL80211_ATTR_FILS_NONCES - for FILS Nonces
+ * (STA Nonce 16 bytes followed by AP Nonce 16 bytes)
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1440,6 +1472,8 @@ enum nl80211_commands {
NL80211_CMD_COLOR_CHANGE_ABORTED,
NL80211_CMD_COLOR_CHANGE_COMPLETED,
+ NL80211_CMD_SET_FILS_AAD,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -2593,6 +2627,18 @@ enum nl80211_commands {
* @NL80211_ATTR_COLOR_CHANGE_ELEMS: Nested set of attributes containing the IE
* information for the time while performing a color switch.
*
+ * @NL80211_ATTR_MBSSID_CONFIG: Nested attribute for multiple BSSID
+ * advertisements (MBSSID) parameters in AP mode.
+ * Kernel uses this attribute to indicate the driver's support for MBSSID
+ * and enhanced multi-BSSID advertisements (EMA AP) to the userspace.
+ * Userspace should use this attribute to configure per interface MBSSID
+ * parameters.
+ * See &enum nl80211_mbssid_config_attributes for details.
+ *
+ * @NL80211_ATTR_MBSSID_ELEMS: Nested parameter to pass multiple BSSID elements.
+ * Mandatory parameter for the transmitting interface to enable MBSSID.
+ * Optional for the non-transmitting interfaces.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3096,6 +3142,9 @@ enum nl80211_attrs {
NL80211_ATTR_COLOR_CHANGE_COLOR,
NL80211_ATTR_COLOR_CHANGE_ELEMS,
+ NL80211_ATTR_MBSSID_CONFIG,
+ NL80211_ATTR_MBSSID_ELEMS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4929,6 +4978,7 @@ enum nl80211_txrate_gi {
* @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz)
* @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz)
* @NL80211_BAND_S1GHZ: around 900MHz, supported by S1G PHYs
+ * @NL80211_BAND_LC: light communication band (placeholder)
* @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
* since newer kernel versions may support more bands
*/
@@ -4938,6 +4988,7 @@ enum nl80211_band {
NL80211_BAND_60GHZ,
NL80211_BAND_6GHZ,
NL80211_BAND_S1GHZ,
+ NL80211_BAND_LC,
NUM_NL80211_BANDS,
};
@@ -5995,6 +6046,11 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_BSS_COLOR: The driver supports BSS color collision
* detection and change announcemnts.
*
+ * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD: Driver running in AP mode supports
+ * FILS encryption and decryption for (Re)Association Request and Response
+ * frames. Userspace has to share FILS AAD details to the driver by using
+ * @NL80211_CMD_SET_FILS_AAD.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6060,6 +6116,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SECURE_RTT,
NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE,
NL80211_EXT_FEATURE_BSS_COLOR,
+ NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -7349,4 +7406,60 @@ enum nl80211_sar_specs_attrs {
NL80211_SAR_ATTR_SPECS_MAX = __NL80211_SAR_ATTR_SPECS_LAST - 1,
};
+/**
+ * enum nl80211_mbssid_config_attributes - multiple BSSID (MBSSID) and enhanced
+ * multi-BSSID advertisements (EMA) in AP mode.
+ * Kernel uses some of these attributes to advertise driver's support for
+ * MBSSID and EMA.
+ * Remaining attributes should be used by the userspace to configure the
+ * features.
+ *
+ * @__NL80211_MBSSID_CONFIG_ATTR_INVALID: Invalid
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES: Used by the kernel to advertise
+ * the maximum number of MBSSID interfaces supported by the driver.
+ * Driver should indicate MBSSID support by setting
+ * wiphy->mbssid_max_interfaces to a value more than or equal to 2.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY: Used by the kernel
+ * to advertise the maximum profile periodicity supported by the driver
+ * if EMA is enabled. Driver should indicate EMA support to the userspace
+ * by setting wiphy->ema_max_profile_periodicity to
+ * a non-zero value.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_INDEX: Mandatory parameter to pass the index of
+ * this BSS (u8) in the multiple BSSID set.
+ * Value must be set to 0 for the transmitting interface and non-zero for
+ * all non-transmitting interfaces. The userspace will be responsible
+ * for using unique indices for the interfaces.
+ * Range: 0 to wiphy->mbssid_max_interfaces-1.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX: Mandatory parameter for
+ * a non-transmitted profile which provides the interface index (u32) of
+ * the transmitted profile. The value must match one of the interface
+ * indices advertised by the kernel. Optional if the interface being set up
+ * is the transmitting one, however, if provided then the value must match
+ * the interface index of the same.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_EMA: Flag used to enable EMA AP feature.
+ * Setting this flag is permitted only if the driver advertises EMA support
+ * by setting wiphy->ema_max_profile_periodicity to non-zero.
+ *
+ * @__NL80211_MBSSID_CONFIG_ATTR_LAST: Internal
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX: highest attribute
+ */
+enum nl80211_mbssid_config_attributes {
+ __NL80211_MBSSID_CONFIG_ATTR_INVALID,
+
+ NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES,
+ NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY,
+ NL80211_MBSSID_CONFIG_ATTR_INDEX,
+ NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX,
+ NL80211_MBSSID_CONFIG_ATTR_EMA,
+
+ /* keep last */
+ __NL80211_MBSSID_CONFIG_ATTR_LAST,
+ NL80211_MBSSID_CONFIG_ATTR_MAX = __NL80211_MBSSID_CONFIG_ATTR_LAST - 1,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e709ae8235e7..ff6ccbc6efe9 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -504,6 +504,12 @@
#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index f92880a15645..bd8860eeb291 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1141,6 +1141,21 @@ enum perf_event_type {
*/
PERF_RECORD_TEXT_POKE = 20,
+ /*
+ * Data written to the AUX area by hardware due to aux_output, may need
+ * to be matched to the event by an architecture-specific hardware ID.
+ * This records the hardware ID, but requires sample_id to provide the
+ * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
+ * records from multiple events.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 hw_id;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -1210,14 +1225,16 @@ union perf_mem_data_src {
mem_remote:1, /* remote */
mem_snoopx:2, /* snoop mode, ext */
mem_blk:3, /* access blocked */
- mem_rsvd:21;
+ mem_hops:3, /* hop level */
+ mem_rsvd:18;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd:21,
+ __u64 mem_rsvd:18,
+ mem_hops:3, /* hop level */
mem_blk:3, /* access blocked */
mem_snoopx:2, /* snoop mode, ext */
mem_remote:1, /* remote */
@@ -1241,7 +1258,13 @@ union perf_mem_data_src {
#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
#define PERF_MEM_OP_SHIFT 0
-/* memory hierarchy (memory level, hit or miss) */
+/*
+ * PERF_MEM_LVL_* namespace being depricated to some extent in the
+ * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
+ * Supporting this namespace inorder to not break defined ABIs.
+ *
+ * memory hierarchy (memory level, hit or miss)
+ */
#define PERF_MEM_LVL_NA 0x01 /* not available */
#define PERF_MEM_LVL_HIT 0x02 /* hit level */
#define PERF_MEM_LVL_MISS 0x04 /* miss level */
@@ -1307,6 +1330,11 @@ union perf_mem_data_src {
#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
#define PERF_MEM_BLK_SHIFT 40
+/* hop level */
+#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
+/* 2-7 available */
+#define PERF_MEM_HOPS_SHIFT 43
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index ec88590b3198..f292b467b27f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -840,6 +840,8 @@ enum {
TCA_FQ_CODEL_CE_THRESHOLD,
TCA_FQ_CODEL_DROP_BATCH_SIZE,
TCA_FQ_CODEL_MEMORY_LIMIT,
+ TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
+ TCA_FQ_CODEL_CE_THRESHOLD_MASK,
__TCA_FQ_CODEL_MAX
};
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 43bd7f713c39..bb73e9a0b24f 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -235,7 +235,7 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
/* MTE tag check fault modes */
-# define PR_MTE_TCF_NONE 0
+# define PR_MTE_TCF_NONE 0UL
# define PR_MTE_TCF_SYNC (1UL << 1)
# define PR_MTE_TCF_ASYNC (1UL << 2)
# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
@@ -268,5 +268,8 @@ struct prctl_mm_map {
# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
# define PR_SCHED_CORE_MAX 4
+# define PR_SCHED_CORE_SCOPE_THREAD 0
+# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
+# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index f950bff75e97..03e5b776e597 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -14,6 +14,7 @@
#include <linux/const.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
/*
* The struct used to pass data via the following ioctl. Similar to the
@@ -66,6 +67,17 @@ struct rtc_pll_info {
long pll_clock; /* base PLL frequency */
};
+struct rtc_param {
+ __u64 param;
+ union {
+ __u64 uvalue;
+ __s64 svalue;
+ __u64 ptr;
+ };
+ __u32 index;
+ __u32 __pad;
+};
+
/*
* ioctl calls that are permitted to the /dev/rtc interface, if
* any of the RTC drivers are enabled.
@@ -95,6 +107,9 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
+#define RTC_PARAM_GET _IOW('p', 0x13, struct rtc_param) /* Get parameter */
+#define RTC_PARAM_SET _IOW('p', 0x14, struct rtc_param) /* Set parameter */
+
#define RTC_VL_DATA_INVALID _BITUL(0) /* Voltage too low, RTC data is invalid */
#define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */
#define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */
@@ -114,7 +129,21 @@ struct rtc_pll_info {
#define RTC_FEATURE_ALARM 0
#define RTC_FEATURE_ALARM_RES_MINUTE 1
#define RTC_FEATURE_NEED_WEEK_DAY 2
-#define RTC_FEATURE_CNT 3
+#define RTC_FEATURE_ALARM_RES_2S 3
+#define RTC_FEATURE_UPDATE_INTERRUPT 4
+#define RTC_FEATURE_CORRECTION 5
+#define RTC_FEATURE_BACKUP_SWITCH_MODE 6
+#define RTC_FEATURE_CNT 7
+
+/* parameter list */
+#define RTC_PARAM_FEATURES 0
+#define RTC_PARAM_CORRECTION 1
+#define RTC_PARAM_BACKUP_SWITCH_MODE 2
+
+#define RTC_BSM_DISABLED 0
+#define RTC_BSM_DIRECT 1
+#define RTC_BSM_LEVEL 2
+#define RTC_BSM_STANDBY 3
#define RTC_MAX_FREQ 8192
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index 0f7f87c70baf..20f33b27787f 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -38,6 +38,9 @@ enum { /* SMC PNET Table commands */
#define SMC_GENL_FAMILY_VERSION 1
#define SMC_PCI_ID_STR_LEN 16 /* Max length of pci id string */
+#define SMC_MAX_HOSTNAME_LEN 32 /* Max length of the hostname */
+#define SMC_MAX_UEID 4 /* Max number of user EIDs */
+#define SMC_MAX_EID_LEN 32 /* Max length of an EID */
/* SMC_GENL_FAMILY commands */
enum {
@@ -49,6 +52,13 @@ enum {
SMC_NETLINK_GET_DEV_SMCR,
SMC_NETLINK_GET_STATS,
SMC_NETLINK_GET_FBACK_STATS,
+ SMC_NETLINK_DUMP_UEID,
+ SMC_NETLINK_ADD_UEID,
+ SMC_NETLINK_REMOVE_UEID,
+ SMC_NETLINK_FLUSH_UEID,
+ SMC_NETLINK_DUMP_SEID,
+ SMC_NETLINK_ENABLE_SEID,
+ SMC_NETLINK_DISABLE_SEID,
};
/* SMC_GENL_FAMILY top level attributes */
@@ -74,17 +84,28 @@ enum {
SMC_NLA_SYS_IS_ISM_V2, /* u8 */
SMC_NLA_SYS_LOCAL_HOST, /* string */
SMC_NLA_SYS_SEID, /* string */
+ SMC_NLA_SYS_IS_SMCR_V2, /* u8 */
__SMC_NLA_SYS_MAX,
SMC_NLA_SYS_MAX = __SMC_NLA_SYS_MAX - 1
};
-/* SMC_NLA_LGR_V2 nested attributes */
+/* SMC_NLA_LGR_D_V2_COMMON and SMC_NLA_LGR_R_V2_COMMON nested attributes */
enum {
SMC_NLA_LGR_V2_VER, /* u8 */
SMC_NLA_LGR_V2_REL, /* u8 */
SMC_NLA_LGR_V2_OS, /* u8 */
SMC_NLA_LGR_V2_NEG_EID, /* string */
SMC_NLA_LGR_V2_PEER_HOST, /* string */
+ __SMC_NLA_LGR_V2_MAX,
+ SMC_NLA_LGR_V2_MAX = __SMC_NLA_LGR_V2_MAX - 1
+};
+
+/* SMC_NLA_LGR_R_V2 nested attributes */
+enum {
+ SMC_NLA_LGR_R_V2_UNSPEC,
+ SMC_NLA_LGR_R_V2_DIRECT, /* u8 */
+ __SMC_NLA_LGR_R_V2_MAX,
+ SMC_NLA_LGR_R_V2_MAX = __SMC_NLA_LGR_R_V2_MAX - 1
};
/* SMC_GEN_LGR_SMCR attributes */
@@ -96,6 +117,8 @@ enum {
SMC_NLA_LGR_R_PNETID, /* string */
SMC_NLA_LGR_R_VLAN_ID, /* u8 */
SMC_NLA_LGR_R_CONNS_NUM, /* u32 */
+ SMC_NLA_LGR_R_V2_COMMON, /* nest */
+ SMC_NLA_LGR_R_V2, /* nest */
__SMC_NLA_LGR_R_MAX,
SMC_NLA_LGR_R_MAX = __SMC_NLA_LGR_R_MAX - 1
};
@@ -128,7 +151,7 @@ enum {
SMC_NLA_LGR_D_PNETID, /* string */
SMC_NLA_LGR_D_CHID, /* u16 */
SMC_NLA_LGR_D_PAD, /* flag */
- SMC_NLA_LGR_V2, /* nest */
+ SMC_NLA_LGR_D_V2_COMMON, /* nest */
__SMC_NLA_LGR_D_MAX,
SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1
};
@@ -242,4 +265,21 @@ enum {
__SMC_NLA_FBACK_STATS_MAX,
SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1
};
+
+/* SMC_NETLINK_UEID attributes */
+enum {
+ SMC_NLA_EID_TABLE_UNSPEC,
+ SMC_NLA_EID_TABLE_ENTRY, /* string */
+ __SMC_NLA_EID_TABLE_MAX,
+ SMC_NLA_EID_TABLE_MAX = __SMC_NLA_EID_TABLE_MAX - 1
+};
+
+/* SMC_NETLINK_SEID attributes */
+enum {
+ SMC_NLA_SEID_UNSPEC,
+ SMC_NLA_SEID_ENTRY, /* string */
+ SMC_NLA_SEID_ENABLED, /* u8 */
+ __SMC_NLA_SEID_TABLE_MAX,
+ SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1
+};
#endif /* _UAPI_LINUX_SMC_H */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index ee8220f8dcf5..3021ea25a284 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -4,3 +4,40 @@
#ifndef __always_inline
#define __always_inline inline
#endif
+
+/**
+ * __struct_group() - Create a mirrored named and anonyomous struct
+ *
+ * @TAG: The tag name for the named sub-struct (usually empty)
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes (usually empty)
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical layout
+ * and size: one anonymous and one named. The former's members can be used
+ * normally without sub-struct naming, and the latter can be used to
+ * reason about the start, end, and size of the group of struct members.
+ * The named struct can also be explicitly tagged for layer reuse, as well
+ * as both having struct attributes appended.
+ */
+#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+ }
+
+/**
+ * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define __DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ struct { \
+ struct { } __empty_ ## NAME; \
+ TYPE NAME[]; \
+ }
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 1e05d3caa712..6a3b194c50fe 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -482,6 +482,7 @@ enum
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
+ NET_IPV4_CONF_ARP_EVICT_NOCARRIER=23,
};
/* /proc/sys/net/ipv4/netfilter */
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index 0d54baea1d8d..5f38be0ec0f3 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -84,6 +84,20 @@
#define TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE 16
#define TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE 8
+#define TLS_CIPHER_SM4_GCM 55
+#define TLS_CIPHER_SM4_GCM_IV_SIZE 8
+#define TLS_CIPHER_SM4_GCM_KEY_SIZE 16
+#define TLS_CIPHER_SM4_GCM_SALT_SIZE 4
+#define TLS_CIPHER_SM4_GCM_TAG_SIZE 16
+#define TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_SM4_CCM 56
+#define TLS_CIPHER_SM4_CCM_IV_SIZE 8
+#define TLS_CIPHER_SM4_CCM_KEY_SIZE 16
+#define TLS_CIPHER_SM4_CCM_SALT_SIZE 4
+#define TLS_CIPHER_SM4_CCM_TAG_SIZE 16
+#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE 8
+
#define TLS_SET_RECORD_TYPE 1
#define TLS_GET_RECORD_TYPE 2
@@ -124,6 +138,22 @@ struct tls12_crypto_info_chacha20_poly1305 {
unsigned char rec_seq[TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE];
};
+struct tls12_crypto_info_sm4_gcm {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_SM4_GCM_IV_SIZE];
+ unsigned char key[TLS_CIPHER_SM4_GCM_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_SM4_GCM_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_sm4_ccm {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_SM4_CCM_IV_SIZE];
+ unsigned char key[TLS_CIPHER_SM4_CCM_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_SM4_CCM_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE];
+};
+
enum {
TLS_INFO_UNSPEC,
TLS_INFO_VERSION,
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 5532b5f68493..5fea5feb0412 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -211,6 +211,11 @@ enum v4l2_colorfx {
* We reserve 128 controls for this driver.
*/
#define V4L2_CID_USER_CCS_BASE (V4L2_CID_USER_BASE + 0x10f0)
+/*
+ * The base for Allegro driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170)
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
@@ -1118,6 +1123,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8)
+#define V4L2_CID_NOTIFY_GAINS (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 9)
/* Image processing controls */
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 66a41e4ec163..a252f06f9dfd 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -17,6 +17,7 @@ enum vdpa_command {
VDPA_CMD_DEV_NEW,
VDPA_CMD_DEV_DEL,
VDPA_CMD_DEV_GET, /* can dump */
+ VDPA_CMD_DEV_CONFIG_GET, /* can dump */
};
enum vdpa_attr {
@@ -32,6 +33,12 @@ enum vdpa_attr {
VDPA_ATTR_DEV_VENDOR_ID, /* u32 */
VDPA_ATTR_DEV_MAX_VQS, /* u32 */
VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */
+ VDPA_ATTR_DEV_MIN_VQ_SIZE, /* u16 */
+
+ VDPA_ATTR_DEV_NET_CFG_MACADDR, /* binary */
+ VDPA_ATTR_DEV_NET_STATUS, /* u8 */
+ VDPA_ATTR_DEV_NET_CFG_MAX_VQP, /* u16 */
+ VDPA_ATTR_DEV_NET_CFG_MTU, /* u16 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 9260791b8438..f118fe7a9f58 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -601,15 +601,12 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
-#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
-#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
-#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
/* three planes - Y Cb, Cr */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
@@ -627,6 +624,15 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */
#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */
+/* Tiled YUV formats */
+#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */
+#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
+#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */
+
+/* Tiled YUV formats, non contiguous planes */
+#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */
+#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
+
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
@@ -733,8 +739,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
+#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
-#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
@@ -953,9 +959,12 @@ struct v4l2_requestbuffers {
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
__u32 capabilities;
- __u32 reserved[1];
+ __u8 flags;
+ __u8 reserved[3];
};
+#define V4L2_MEMORY_FLAG_NON_COHERENT (1 << 0)
+
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
@@ -2499,6 +2508,9 @@ struct v4l2_dbg_chip_info {
* @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
+ * and configured for MMAP streaming I/O).
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2507,7 +2519,8 @@ struct v4l2_create_buffers {
__u32 memory;
struct v4l2_format format;
__u32 capabilities;
- __u32 reserved[7];
+ __u32 flags;
+ __u32 reserved[6];
};
/*
@@ -2615,4 +2628,10 @@ struct v4l2_create_buffers {
#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
+/* Deprecated definitions kept for backwards compatibility */
+#ifndef __KERNEL__
+#define V4L2_PIX_FMT_HM12 V4L2_PIX_FMT_NV12_16L16
+#define V4L2_PIX_FMT_SUNXI_TILED_NV12 V4L2_PIX_FMT_NV12_32L32
+#endif
+
#endif /* _UAPI__LINUX_VIDEODEV2_H */
diff --git a/include/uapi/linux/virtio_gpio.h b/include/uapi/linux/virtio_gpio.h
index 0445f905d8cc..d4b29d9a39dd 100644
--- a/include/uapi/linux/virtio_gpio.h
+++ b/include/uapi/linux/virtio_gpio.h
@@ -5,12 +5,16 @@
#include <linux/types.h>
+/* Virtio GPIO Feature bits */
+#define VIRTIO_GPIO_F_IRQ 0
+
/* Virtio GPIO request types */
#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001
#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002
#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003
#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004
#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005
+#define VIRTIO_GPIO_MSG_IRQ_TYPE 0x0006
/* Possible values of the status field */
#define VIRTIO_GPIO_STATUS_OK 0x0
@@ -21,11 +25,19 @@
#define VIRTIO_GPIO_DIRECTION_OUT 0x01
#define VIRTIO_GPIO_DIRECTION_IN 0x02
+/* Virtio GPIO IRQ types */
+#define VIRTIO_GPIO_IRQ_TYPE_NONE 0x00
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
struct virtio_gpio_config {
__le16 ngpio;
__u8 padding[2];
__le32 gpio_names_size;
-} __packed;
+};
/* Virtio GPIO Request / Response */
struct virtio_gpio_request {
@@ -44,4 +56,17 @@ struct virtio_gpio_response_get_names {
__u8 value[];
};
+/* Virtio GPIO IRQ Request / Response */
+struct virtio_gpio_irq_request {
+ __le16 gpio;
+};
+
+struct virtio_gpio_irq_response {
+ __u8 status;
+};
+
+/* Possible values of the interrupt status field */
+#define VIRTIO_GPIO_IRQ_STATUS_INVALID 0x0
+#define VIRTIO_GPIO_IRQ_STATUS_VALID 0x1
+
#endif /* _LINUX_VIRTIO_GPIO_H */
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 97523a95781d..f556fde07b76 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -59,6 +59,11 @@
* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
*/
#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+/*
+ * VIRTIO_GPU_CMD_CREATE_CONTEXT with
+ * context_init and multiple timelines
+ */
+#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -122,14 +127,20 @@ enum virtio_gpu_shm_id {
VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
};
-#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+/*
+ * If the following flag is set, then ring_idx contains the index
+ * of the command ring that needs to used when creating the fence
+ */
+#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
__le32 type;
__le32 flags;
__le64 fence_id;
__le32 ctx_id;
- __le32 padding;
+ __u8 ring_idx;
+ __u8 padding[3];
};
/* data passed in the cursor vq */
@@ -269,10 +280,11 @@ struct virtio_gpu_resource_create_3d {
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
+#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
__le32 nlen;
- __le32 padding;
+ __le32 context_init;
char debug_name[64];
};
diff --git a/include/uapi/linux/virtio_i2c.h b/include/uapi/linux/virtio_i2c.h
index 7c6a6fc01ad6..acf3b6069136 100644
--- a/include/uapi/linux/virtio_i2c.h
+++ b/include/uapi/linux/virtio_i2c.h
@@ -11,9 +11,15 @@
#include <linux/const.h>
#include <linux/types.h>
+/* Virtio I2C Feature bits */
+#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0
+
/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */
#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0)
+/* The bit 1 of the @virtio_i2c_out_hdr.@flags, used to mark a buffer as read */
+#define VIRTIO_I2C_FLAGS_M_RD _BITUL(1)
+
/**
* struct virtio_i2c_out_hdr - the virtio I2C message OUT header
* @addr: the controlled device address
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
index 70e01c687d5e..e9122f1d0e0c 100644
--- a/include/uapi/linux/virtio_mem.h
+++ b/include/uapi/linux/virtio_mem.h
@@ -68,9 +68,10 @@
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
*
* There are no guarantees what will happen if unplugged memory is
- * read/written. Such memory should, in general, not be touched. E.g.,
- * even writing might succeed, but the values will simply be discarded at
- * random points in time.
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
*
* It can happen that the device cannot process a request, because it is
* busy. The device driver has to retry later.
@@ -87,6 +88,8 @@
/* node_id is an ACPI PXM and is valid */
#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
/* --- virtio-mem: guest -> host requests --- */
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index 46918a1852d7..c60ca33eac59 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -64,7 +64,7 @@
* timeout for a STREAM socket.
*/
-#define SO_VM_SOCKETS_CONNECT_TIMEOUT 6
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD 6
/* Option name for using non-blocking send/receive. Use as the option name
* for setsockopt(3) or getsockopt(3) to set or get the non-blocking
@@ -81,6 +81,17 @@
#define SO_VM_SOCKETS_NONBLOCK_TXRX 7
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW 8
+
+#if !defined(__KERNEL__)
+#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD
+#else
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT \
+ (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD : SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW)
+#endif
+#endif
+
/* The vSocket equivalent of INADDR_ANY. This works for the svm_cid field of
* sockaddr_vm and indicates the context ID of the current endpoint.
*/
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index d13bb8c1b450..00b309590499 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -272,6 +272,16 @@ enum hl_gaudi_pll_index {
HL_GAUDI_PLL_MAX
};
+/**
+ * enum hl_device_status - Device status information.
+ * @HL_DEVICE_STATUS_OPERATIONAL: Device is operational.
+ * @HL_DEVICE_STATUS_IN_RESET: Device is currently during reset.
+ * @HL_DEVICE_STATUS_MALFUNCTION: Device is unusable.
+ * @HL_DEVICE_STATUS_NEEDS_RESET: Device needs reset because auto reset was disabled.
+ * @HL_DEVICE_STATUS_IN_DEVICE_CREATION: Device is operational but its creation is still in
+ * progress.
+ * @HL_DEVICE_STATUS_LAST: Last status.
+ */
enum hl_device_status {
HL_DEVICE_STATUS_OPERATIONAL,
HL_DEVICE_STATUS_IN_RESET,
@@ -556,33 +566,30 @@ enum gaudi_dcores {
HL_GAUDI_ES_DCORE
};
+/**
+ * struct hl_info_args - Main structure to retrieve device related information.
+ * @return_pointer: User space address of the relevant structure related to HL_INFO_* operation
+ * mentioned in @op.
+ * @return_size: Size of the structure used in @return_pointer, just like "size" in "snprintf", it
+ * limits how many bytes the kernel can write. For hw_events array, the size should be
+ * hl_info_hw_ip_info.num_of_events * sizeof(__u32).
+ * @op: Defines which type of information to be retrieved. Refer HL_INFO_* for details.
+ * @dcore_id: DCORE id for which the information is relevant (for Gaudi refer to enum gaudi_dcores).
+ * @ctx_id: Context ID of the user. Currently not in use.
+ * @period_ms: Period value, in milliseconds, for utilization rate in range 100ms - 1000ms in 100 ms
+ * resolution. Currently not in use.
+ * @pll_index: Index as defined in hl_<asic type>_pll_index enumeration.
+ * @pad: Padding to 64 bit.
+ */
struct hl_info_args {
- /* Location of relevant struct in userspace */
__u64 return_pointer;
- /*
- * The size of the return value. Just like "size" in "snprintf",
- * it limits how many bytes the kernel can write
- *
- * For hw_events array, the size should be
- * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
- */
__u32 return_size;
-
- /* HL_INFO_* */
__u32 op;
union {
- /* Dcore id for which the information is relevant.
- * For Gaudi refer to 'enum gaudi_dcores'
- */
__u32 dcore_id;
- /* Context ID - Currently not in use */
__u32 ctx_id;
- /* Period value for utilization rate (100ms - 1000ms, in 100ms
- * resolution.
- */
__u32 period_ms;
- /* PLL frequency retrieval */
__u32 pll_index;
};
@@ -890,11 +897,7 @@ struct hl_wait_cs_in {
*/
__u64 addr;
/* Target value for completion comparison */
- __u32 target;
- /* Absolute timeout to wait for interrupt
- * in microseconds
- */
- __u32 interrupt_timeout_us;
+ __u64 target;
};
};
@@ -910,7 +913,12 @@ struct hl_wait_cs_in {
/* Multi CS API info- valid entries in multi-CS array */
__u8 seq_arr_len;
- __u8 pad[7];
+ __u8 pad[3];
+
+ /* Absolute timeout to wait for an interrupt in microseconds.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ */
+ __u32 interrupt_timeout_us;
};
#define HL_WAIT_CS_STATUS_COMPLETED 0
@@ -952,6 +960,10 @@ union hl_wait_cs_args {
#define HL_MEM_OP_UNMAP 3
/* Opcode to map a hw block */
#define HL_MEM_OP_MAP_BLOCK 4
+/* Opcode to create DMA-BUF object for an existing device memory allocation
+ * and to export an FD of that DMA-BUF back to the caller
+ */
+#define HL_MEM_OP_EXPORT_DMABUF_FD 5
/* Memory flags */
#define HL_MEM_CONTIGUOUS 0x1
@@ -1023,11 +1035,26 @@ struct hl_mem_in {
/* Virtual address returned from HL_MEM_OP_MAP */
__u64 device_virt_addr;
} unmap;
+
+ /* HL_MEM_OP_EXPORT_DMABUF_FD */
+ struct {
+ /* Handle returned from HL_MEM_OP_ALLOC. In Gaudi,
+ * where we don't have MMU for the device memory, the
+ * driver expects a physical address (instead of
+ * a handle) in the device memory space.
+ */
+ __u64 handle;
+ /* Size of memory allocation. Relevant only for GAUDI */
+ __u64 mem_size;
+ } export_dmabuf_fd;
};
/* HL_MEM_OP_* */
__u32 op;
- /* HL_MEM_* flags */
+ /* HL_MEM_* flags.
+ * For the HL_MEM_OP_EXPORT_DMABUF_FD opcode, this field holds the
+ * DMA-BUF file/FD flags.
+ */
__u32 flags;
/* Context ID - Currently not in use */
__u32 ctx_id;
@@ -1064,6 +1091,13 @@ struct hl_mem_out {
__u32 pad;
};
+
+ /* Returned in HL_MEM_OP_EXPORT_DMABUF_FD. Represents the
+ * DMA-BUF object that was created to describe a memory
+ * allocation on the device's memory space. The FD should be
+ * passed to the importer driver
+ */
+ __s32 fd;
};
};
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index f89fbb5b1e8d..08035ccf1fff 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
@@ -52,11 +52,20 @@ struct efa_ibv_alloc_pd_resp {
__u8 reserved_30[2];
};
+enum {
+ EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL = 1 << 0,
+};
+
struct efa_ibv_create_cq {
__u32 comp_mask;
__u32 cq_entry_size;
__u16 num_sub_cqs;
- __u8 reserved_50[6];
+ __u8 flags;
+ __u8 reserved_58[5];
+};
+
+enum {
+ EFA_CREATE_CQ_RESP_DB_OFF = 1 << 0,
};
struct efa_ibv_create_cq_resp {
@@ -65,7 +74,9 @@ struct efa_ibv_create_cq_resp {
__aligned_u64 q_mmap_key;
__aligned_u64 q_mmap_size;
__u16 cq_idx;
- __u8 reserved_d0[6];
+ __u8 reserved_d0[2];
+ __u32 db_off;
+ __aligned_u64 db_mmap_key;
};
enum {
@@ -106,6 +117,7 @@ struct efa_ibv_create_ah_resp {
enum {
EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0,
EFA_QUERY_DEVICE_CAPS_RNR_RETRY = 1 << 1,
+ EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS = 1 << 2,
};
struct efa_ibv_ex_query_device_resp {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 75a1ae2311d8..e50c357367db 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -297,6 +297,8 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_RES_SRQ_GET, /* can dump */
+ RDMA_NLDEV_CMD_STAT_GET_STATUS,
+
RDMA_NLDEV_NUM_OPS
};
@@ -549,6 +551,9 @@ enum rdma_nldev_attr {
RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, /* u8 */
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, /* u32 */
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, /* u8 */
+
/*
* Always the end
*/
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index e283c2220aba..f09c5c9e3dd5 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -98,6 +98,10 @@ struct rxe_send_wr {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
+ __u16 reserved;
+ __u32 ah_num;
+ __u32 pad[4];
+ struct rxe_av av;
} ud;
struct {
__aligned_u64 addr;
@@ -141,14 +145,13 @@ struct rxe_dma_info {
__u32 sge_offset;
__u32 reserved;
union {
- __u8 inline_data[0];
- struct rxe_sge sge[0];
+ __DECLARE_FLEX_ARRAY(__u8, inline_data);
+ __DECLARE_FLEX_ARRAY(struct rxe_sge, sge);
};
};
struct rxe_send_wqe {
struct rxe_send_wr wr;
- struct rxe_av av;
__u32 status;
__u32 state;
__aligned_u64 iova;
@@ -168,6 +171,11 @@ struct rxe_recv_wqe {
struct rxe_dma_info dma;
};
+struct rxe_create_ah_resp {
+ __u32 ah_num;
+ __u32 reserved;
+};
+
struct rxe_create_cq_resp {
struct mminfo mi;
};
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index da61398b1f8f..053949287ce8 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -240,8 +240,8 @@ struct snd_soc_tplg_vendor_array {
struct snd_soc_tplg_private {
__le32 size; /* in bytes of private data */
union {
- char data[0];
- struct snd_soc_tplg_vendor_array array[0];
+ __DECLARE_FLEX_ARRAY(char, data);
+ __DECLARE_FLEX_ARRAY(struct snd_soc_tplg_vendor_array, array);
};
} __attribute__((packed));
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 5fbb79e30819..1834f58b8ede 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -202,6 +202,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+/*
+ * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+ * available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+ * `right-padding` sample which has less width than 32 bit.
+ */
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
index b40485e54d80..9d7dd1c65a21 100644
--- a/include/xen/arm/hypercall.h
+++ b/include/xen/arm/hypercall.h
@@ -53,7 +53,6 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg);
int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
int HYPERVISOR_physdev_op(int cmd, void *arg);
int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
-int HYPERVISOR_tmem_op(void *arg);
int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs,
struct xen_dm_op_buf *bufs);
@@ -74,18 +73,4 @@ HYPERVISOR_suspend(unsigned long start_info_mfn)
return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
}
-static inline void
-MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
- unsigned int new_val, unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
- int count, int *success_count, domid_t domid)
-{
- BUG();
-}
-
#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 6dbdb0b3fd03..e93d4f0088c5 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -26,9 +26,6 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
-int alloc_xenballooned_pages(int nr_pages, struct page **pages);
-void free_xenballooned_pages(int nr_pages, struct page **pages);
-
#ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void);
#else
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
index dc3193f4b581..c67822a25ea6 100644
--- a/include/xen/interface/callback.h
+++ b/include/xen/interface/callback.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* callback.h
*
* Register guest OS callbacks with Xen.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2006, Ian Campbell
*/
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 449bd383cb76..38deb1214613 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* elfnote.h
*
* Definitions used for the Xen ELF notes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2006, Ian Campbell, XenSource Ltd.
*/
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
index cf80e338fbb0..5f8da466e8a9 100644
--- a/include/xen/interface/event_channel.h
+++ b/include/xen/interface/event_channel.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* event_channel.h
*
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 5a7bdefa06a8..53f760378e39 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* features.h
*
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index 7fb7112d667c..3eeabbc7ee09 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -1,27 +1,10 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* grant_table.h
*
* Interface for granting foreign access to page frames, and receiving
* page-ownership transfers.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2004, K A Fraser
*/
diff --git a/include/xen/interface/hvm/dm_op.h b/include/xen/interface/hvm/dm_op.h
index ee9e480bc559..08d972f87c7b 100644
--- a/include/xen/interface/hvm/dm_op.h
+++ b/include/xen/interface/hvm/dm_op.h
@@ -1,23 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (c) 2016, Citrix Systems Inc
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
diff --git a/include/xen/interface/hvm/hvm_op.h b/include/xen/interface/hvm/hvm_op.h
index 25d945ef17de..f3097e79bb03 100644
--- a/include/xen/interface/hvm/hvm_op.h
+++ b/include/xen/interface/hvm/hvm_op.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#define __XEN_PUBLIC_HVM_HVM_OP_H__
diff --git a/include/xen/interface/hvm/hvm_vcpu.h b/include/xen/interface/hvm/hvm_vcpu.h
index bfc2138e0bf5..cbf93493275c 100644
--- a/include/xen/interface/hvm/hvm_vcpu.h
+++ b/include/xen/interface/hvm/hvm_vcpu.h
@@ -1,22 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2015, Roger Pau Monne <roger.pau@citrix.com>
*/
diff --git a/include/xen/interface/hvm/params.h b/include/xen/interface/hvm/params.h
index 4d61fc58d99d..4e2c94b3c466 100644
--- a/include/xen/interface/hvm/params.h
+++ b/include/xen/interface/hvm/params.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
#define __XEN_PUBLIC_HVM_PARAMS_H__
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 50af9ea2ff1e..e33557c0b4e9 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -1,22 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2016, Citrix Systems, Inc.
*/
diff --git a/include/xen/interface/io/9pfs.h b/include/xen/interface/io/9pfs.h
index 5b6c19dae5e2..f1a4c5ad2fd1 100644
--- a/include/xen/interface/io/9pfs.h
+++ b/include/xen/interface/io/9pfs.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* 9pfs.h -- Xen 9PFS transport
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
*/
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 5e40041c7e95..ba1e9f5b630e 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* blkif.h
*
diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
index 85ca8b02695a..cf17e89ed861 100644
--- a/include/xen/interface/io/console.h
+++ b/include/xen/interface/io/console.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* console.h
*
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index d43ca0361f86..18417b017869 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* displif.h
*
* Unified display device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2016-2017 EPAM Systems Inc.
*
* Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
diff --git a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h
index 974a51ed9165..60ca808cef97 100644
--- a/include/xen/interface/io/fbif.h
+++ b/include/xen/interface/io/fbif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* fbif.h -- Xen virtual frame buffer device
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index 5c7630d7376e..b8b08aa53325 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* kbdif.h -- Xen virtual keyboard/mouse
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 2194322c3c7f..cb0c1a25d5d4 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* xen_netif.h
*
* Unified network-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2003-2004, Keir Fraser
*/
diff --git a/include/xen/interface/io/pciif.h b/include/xen/interface/io/pciif.h
index d9922ae36eb5..d1a87b62daae 100644
--- a/include/xen/interface/io/pciif.h
+++ b/include/xen/interface/io/pciif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* PCI Backend/Frontend Common Data Structures & Macros
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#ifndef __XEN_PCI_COMMON_H__
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
index 6a89dc1bf225..22099bb4079f 100644
--- a/include/xen/interface/io/protocols.h
+++ b/include/xen/interface/io/protocols.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PROTOCOLS_H__
#define __XEN_PROTOCOLS_H__
diff --git a/include/xen/interface/io/pvcalls.h b/include/xen/interface/io/pvcalls.h
index ccf97b817e72..b6680fdbe2a8 100644
--- a/include/xen/interface/io/pvcalls.h
+++ b/include/xen/interface/io/pvcalls.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: MIT */
+
#ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__
#define __XEN_PUBLIC_IO_XEN_PVCALLS_H__
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index b39cdbc522ec..2470ec45ebb2 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* ring.h
*
* Shared producer-consumer ring macros.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Tim Deegan and Andrew Warfield November 2004.
*/
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 2aac8f73614c..445657cdb1de 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* sndif.h
*
* Unified sound-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2013-2015 GlobalLogic Inc.
* Copyright (C) 2016-2017 EPAM Systems Inc.
*
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
index d07d7aca8d1c..1f6047d3de44 100644
--- a/include/xen/interface/io/vscsiif.h
+++ b/include/xen/interface/io/vscsiif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* vscsiif.h
*
* Based on the blkif.h code.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright(c) FUJITSU Limited 2008.
*/
diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h
index fb8716112251..44456e2853fc 100644
--- a/include/xen/interface/io/xenbus.h
+++ b/include/xen/interface/io/xenbus.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*****************************************************************************
* xenbus.h
*
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 1517c7e93a3a..d40a44f09b16 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Details of the "wire" protocol between Xen Store Daemon and client
* library or guest kernel.
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 447004861f00..1a371a825c55 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* memory.h
*
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
index 73d9b0a2974e..b665fdbef1fd 100644
--- a/include/xen/interface/nmi.h
+++ b/include/xen/interface/nmi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* nmi.h
*
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 610dba9b620a..a237af867873 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_PHYSDEV_H__
#define __XEN_PUBLIC_PHYSDEV_H__
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 732efb08c3e1..655d92e803e1 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* platform.h
*
* Hardware platform operations. Intended for use by domain-0 kernel.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2002-2006, K Fraser
*/
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index a4c4d735d781..4dac0634ffff 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* sched.h
*
* Scheduler state interactions
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index 504c71601511..c7cc28ad8d63 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* vcpu.h
*
* VCPU initialisation, query, and hotplug.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
index 8772b552c006..37d6588873d6 100644
--- a/include/xen/interface/version.h
+++ b/include/xen/interface/version.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* version.h
*
diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h
index 7483a78d2425..464aa6b3a5f9 100644
--- a/include/xen/interface/xen-mca.h
+++ b/include/xen/interface/xen-mca.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* arch-x86/mca.h
* Guest OS machine check interface to x86 Xen.
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 5ee37a296481..5e9916939268 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* xen.h
*
* Guest OS interface to Xen.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2004, K A Fraser
*/
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
index ad603eab24b3..e2ee73d91bd6 100644
--- a/include/xen/interface/xenpmu.h
+++ b/include/xen/interface/xenpmu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_XENPMU_H__
#define __XEN_PUBLIC_XENPMU_H__
diff --git a/include/xen/pci.h b/include/xen/pci.h
new file mode 100644
index 000000000000..b8337cf85fd1
--- /dev/null
+++ b/include/xen/pci.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __XEN_PCI_H__
+#define __XEN_PCI_H__
+
+#if defined(CONFIG_XEN_DOM0)
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
+static inline int xen_find_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+
+static inline int xen_register_device_domain_owner(struct pci_dev *dev,
+ uint16_t domain)
+{
+ return -1;
+}
+
+static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+#endif
+
+#endif
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 43efba045acc..9f031b5faa54 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -52,13 +52,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
-#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
-#else
-#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
-#define xen_free_unpopulated_pages free_xenballooned_pages
-#include <xen/balloon.h>
-#endif
#endif /* _XEN_XEN_H */