summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h1
-rw-r--r--include/acpi/acoutput.h5
-rw-r--r--include/acpi/acpi_bus.h9
-rw-r--r--include/acpi/acpixf.h10
-rw-r--r--include/acpi/actbl1.h10
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actbl3.h12
-rw-r--r--include/acpi/actypes.h1
-rw-r--r--include/acpi/cppc_acpi.h43
-rw-r--r--include/acpi/platform/acenv.h6
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/mm_hooks.h11
-rw-r--r--include/asm-generic/mmzone.h5
-rw-r--r--include/asm-generic/numa.h8
-rw-r--r--include/asm-generic/uaccess.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/crypto/chacha.h2
-rw-r--r--include/crypto/internal/ecc.h2
-rw-r--r--include/crypto/internal/poly1305.h2
-rw-r--r--include/crypto/internal/simd.h12
-rw-r--r--include/crypto/sha1_base.h2
-rw-r--r--include/crypto/sha256_base.h2
-rw-r--r--include/crypto/sha512_base.h2
-rw-r--r--include/crypto/sm3_base.h2
-rw-r--r--include/crypto/utils.h2
-rw-r--r--include/cxl/einj.h (renamed from include/linux/einj-cxl.h)0
-rw-r--r--include/cxl/event.h (renamed from include/linux/cxl-event.h)0
-rw-r--r--include/cxl/mailbox.h28
-rw-r--r--include/drm/display/drm_dp.h4
-rw-r--r--include/drm/display/drm_dp_helper.h3
-rw-r--r--include/drm/display/drm_dp_mst_helper.h14
-rw-r--r--include/drm/drm_accel.h21
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_connector.h8
-rw-r--r--include/drm/drm_device.h5
-rw-r--r--include/drm/drm_drv.h28
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/drm_file.h5
-rw-r--r--include/drm/drm_fixed.h3
-rw-r--r--include/drm/drm_gem.h3
-rw-r--r--include/drm/drm_gem_dma_helper.h1
-rw-r--r--include/drm/drm_mipi_dsi.h12
-rw-r--r--include/drm/drm_mode_config.h16
-rw-r--r--include/drm/drm_panic.h21
-rw-r--r--include/drm/drm_prime.h3
-rw-r--r--include/drm/drm_print.h54
-rw-r--r--include/drm/drm_rect.h15
-rw-r--r--include/drm/drm_vblank.h37
-rw-r--r--include/drm/gpu_scheduler.h4
-rw-r--r--include/drm/ttm/ttm_bo.h48
-rw-r--r--include/drm/ttm/ttm_resource.h97
-rw-r--r--include/dt-bindings/arm/qcom,ids.h4
-rw-r--r--include/dt-bindings/clock/at91.h4
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h7
-rw-r--r--include/dt-bindings/clock/cirrus,ep9301-syscon.h46
-rw-r--r--include/dt-bindings/clock/exynos7885.h32
-rw-r--r--include/dt-bindings/clock/exynos850.h1
-rw-r--r--include/dt-bindings/clock/nxp,imx95-clock.h3
-rw-r--r--include/dt-bindings/clock/px30-cru.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h5
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h5
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-camcc.h106
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-dispcc.h51
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-gpucc.h62
-rw-r--r--include/dt-bindings/clock/qcom,sm8150-camcc.h135
l---------[-rw-r--r--]include/dt-bindings/clock/qcom,sm8650-dispcc.h103
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g057-cpg.h21
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3228-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3308-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h4
-rw-r--r--include/dt-bindings/clock/rockchip,rk3576-cru.h592
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h11
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h191
-rw-r--r--include/dt-bindings/iio/adi,ad4695.h9
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq5332.h46
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8937.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8976.h97
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8350.h10
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h2
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h2
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1800b.h63
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1812h.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv18xx.h19
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2000.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2002.h79
-rw-r--r--include/dt-bindings/power/rockchip,rk3576-power.h30
-rw-r--r--include/dt-bindings/reset/rockchip,rk3576-cru.h564
-rw-r--r--include/dt-bindings/soc/qe-fsl,tsa.h13
-rw-r--r--include/keys/dns_resolver-type.h4
-rw-r--r--include/kunit/clk.h28
-rw-r--r--include/kunit/of.h115
-rw-r--r--include/kunit/platform_device.h20
-rw-r--r--include/kunit/visibility.h1
-rw-r--r--include/kvm/arm_pmu.h8
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/acpi_pmtmr.h13
-rw-r--r--include/linux/alloc_tag.h26
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/args.h6
-rw-r--r--include/linux/arm-smccc.h88
-rw-r--r--include/linux/arm_ffa.h12
-rw-r--r--include/linux/attribute_container.h6
-rw-r--r--include/linux/auxiliary_bus.h2
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/bitmap.h140
-rw-r--r--include/linux/bits.h15
-rw-r--r--include/linux/blk-integrity.h15
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h7
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpf.h39
-rw-r--r--include/linux/bpf_lsm.h8
-rw-r--r--include/linux/bpf_verifier.h27
-rw-r--r--include/linux/btf.h5
-rw-r--r--include/linux/buffer_head.h17
-rw-r--r--include/linux/buildid.h4
-rw-r--r--include/linux/ceph/decode.h2
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup-defs.h25
-rw-r--r--include/linux/cgroup.h7
-rw-r--r--include/linux/cleanup.h138
-rw-r--r--include/linux/clk-provider.h14
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/closure.h35
-rw-r--r--include/linux/cma.h16
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/compiler_types.h7
-rw-r--r--include/linux/console.h158
-rw-r--r--include/linux/context_tracking.h32
-rw-r--r--include/linux/context_tracking_state.h60
-rw-r--r--include/linux/coredump.h22
-rw-r--r--include/linux/coresight-pmu.h17
-rw-r--r--include/linux/coresight.h21
-rw-r--r--include/linux/cpufreq.h12
-rw-r--r--include/linux/cpuhotplug.h5
-rw-r--r--include/linux/cpumask.h212
-rw-r--r--include/linux/cpuset.h10
-rw-r--r--include/linux/damon.h3
-rw-r--r--include/linux/debugfs.h1
-rw-r--r--include/linux/decompress/unxz.h5
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/device/bus.h6
-rw-r--r--include/linux/device/class.h2
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-fence-array.h6
-rw-r--r--include/linux/dma-heap.h21
-rw-r--r--include/linux/dma-map-ops.h38
-rw-r--r--include/linux/dma-mapping.h25
-rw-r--r--include/linux/dma/ipu-dma.h174
-rw-r--r--include/linux/dma/k3-udma-glue.h2
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/entry-common.h2
-rw-r--r--include/linux/err.h9
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/falloc.h18
-rw-r--r--include/linux/fault-inject.h36
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fdtable.h8
-rw-r--r--include/linux/file.h55
-rw-r--r--include/linux/filelock.h14
-rw-r--r--include/linux/filter.h10
-rw-r--r--include/linux/find.h50
-rw-r--r--include/linux/firewire.h22
-rw-r--r--include/linux/firmware/imx/sm.h23
-rw-r--r--include/linux/folio_queue.h324
-rw-r--r--include/linux/fs.h219
-rw-r--r--include/linux/fsl/enetc_mdio.h3
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/fsnotify_backend.h10
-rw-r--r--include/linux/generic-radix-tree.h105
-rw-r--r--include/linux/gfp.h25
-rw-r--r--include/linux/gfp_types.h8
-rw-r--r--include/linux/gpio.h12
-rw-r--r--include/linux/hdmi.h9
-rw-r--r--include/linux/hid.h12
-rw-r--r--include/linux/hid_bpf.h2
-rw-r--r--include/linux/hidraw.h1
-rw-r--r--include/linux/huge_mm.h158
-rw-r--r--include/linux/hugetlb.h27
-rw-r--r--include/linux/hwmon.h1
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/i3c/master.h16
-rw-r--r--include/linux/i8253.h2
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/iio/backend.h62
-rw-r--r--include/linux/iio/iio.h39
-rw-r--r--include/linux/input/matrix_keypad.h48
-rw-r--r--include/linux/intel_vsec.h149
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/io-pgtable.h4
-rw-r--r--include/linux/io_uring/cmd.h15
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/iomap.h13
-rw-r--r--include/linux/iommu-dma.h69
-rw-r--r--include/linux/iommufd.h12
-rw-r--r--include/linux/ioprio.h2
-rw-r--r--include/linux/ioremap.h1
-rw-r--r--include/linux/iov_iter.h104
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/irqchip/riscv-imsic.h9
-rw-r--r--include/linux/irqdomain.h8
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kasan.h63
-rw-r--r--include/linux/kernel-page-flags.h3
-rw-r--r--include/linux/key.h3
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/khugepaged.h1
-rw-r--r--include/linux/kmsg_dump.h22
-rw-r--r--include/linux/kprobes.h9
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/libata.h157
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lru_cache.h4
-rw-r--r--include/linux/lsm_count.h135
-rw-r--r--include/linux/lsm_hook_defs.h22
-rw-r--r--include/linux/lsm_hooks.h129
-rw-r--r--include/linux/maple_tree.h20
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h67
-rw-r--r--include/linux/memory_hotplug.h48
-rw-r--r--include/linux/mfd/88pm80x.h2
-rw-r--r--include/linux/mfd/adp5585.h126
-rw-r--r--include/linux/mfd/axp20x.h27
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mfd/max77693-private.h5
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mlx5/device.h31
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h115
-rw-r--r--include/linux/mm.h339
-rw-r--r--include/linux/mm_types.h22
-rw-r--r--include/linux/mm_types_task.h3
-rw-r--r--include/linux/mmc/core.h12
-rw-r--r--include/linux/mmc/host.h28
-rw-r--r--include/linux/mmzone.h35
-rw-r--r--include/linux/mnt_idmapping.h1
-rw-r--r--include/linux/mnt_namespace.h4
-rw-r--r--include/linux/mpi.h192
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/mtd/nand.h90
-rw-r--r--include/linux/mtd/spinand.h19
-rw-r--r--include/linux/mutex.h19
-rw-r--r--include/linux/net.h19
-rw-r--r--include/linux/netdevice.h30
-rw-r--r--include/linux/netfilter.h4
-rw-r--r--include/linux/netfs.h46
-rw-r--r--include/linux/nfs.h9
-rw-r--r--include/linux/nfs4.h17
-rw-r--r--include/linux/nfs_common.h17
-rw-r--r--include/linux/nfs_fs_sb.h14
-rw-r--r--include/linux/nfs_xdr.h22
-rw-r--r--include/linux/nfslocalio.h89
-rw-r--r--include/linux/nodemask.h86
-rw-r--r--include/linux/numa.h8
-rw-r--r--include/linux/numa_memblks.h58
-rw-r--r--include/linux/nvme-keyring.h6
-rw-r--r--include/linux/nvme-rdma.h6
-rw-r--r--include/linux/nvme.h8
-rw-r--r--include/linux/omap-gpmc.h10
-rw-r--r--include/linux/page-flags.h202
-rw-r--r--include/linux/page_counter.h27
-rw-r--r--include/linux/pagemap.h126
-rw-r--r--include/linux/pagewalk.h58
-rw-r--r--include/linux/path.h6
-rw-r--r--include/linux/pci-ats.h3
-rw-r--r--include/linux/pci-epc.h3
-rw-r--r--include/linux/pci.h11
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/percpu-rwsem.h2
-rw-r--r--include/linux/percpu.h1
-rw-r--r--include/linux/perf/arm_pmu.h10
-rw-r--r--include/linux/perf/arm_pmuv3.h9
-rw-r--r--include/linux/perf_event.h40
-rw-r--r--include/linux/pgalloc_tag.h31
-rw-r--r--include/linux/pgtable.h18
-rw-r--r--include/linux/pinctrl/pinconf-generic.h3
-rw-r--r--include/linux/platform_data/ad5449.h39
-rw-r--r--include/linux/platform_data/amd_qdma.h36
-rw-r--r--include/linux/platform_data/cyttsp4.h62
-rw-r--r--include/linux/platform_data/dma-ep93xx.h94
-rw-r--r--include/linux/platform_data/eth-ep93xx.h10
-rw-r--r--include/linux/platform_data/gpio-ath79.h16
-rw-r--r--include/linux/platform_data/gpio-davinci.h21
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h32
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h50
-rw-r--r--include/linux/platform_data/max6697.h33
-rw-r--r--include/linux/platform_data/mcs.h30
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h36
-rw-r--r--include/linux/platform_data/mtd-davinci.h88
-rw-r--r--include/linux/platform_data/spi-ep93xx.h15
-rw-r--r--include/linux/platform_data/ti-aemif.h45
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h37
-rw-r--r--include/linux/platform_data/x86/intel-mid_wdt.h (renamed from include/linux/platform_data/intel-mid_wdt.h)6
-rw-r--r--include/linux/platform_data/x86/intel_scu_ipc.h68
-rw-r--r--include/linux/platform_data/zforce_ts.h15
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm_domain.h16
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/printk.h33
-rw-r--r--include/linux/prmt.h5
-rw-r--r--include/linux/pstore.h2
-rw-r--r--include/linux/ptp_classify.h2
-rw-r--r--include/linux/pwm.h10
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/ratelimit_types.h2
-rw-r--r--include/linux/rbtree.h67
-rw-r--r--include/linux/rcu_segcblist.h6
-rw-r--r--include/linux/rculist.h9
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/rcutiny.h7
-rw-r--r--include/linux/rcutree.h3
-rw-r--r--include/linux/regmap.h4
-rw-r--r--include/linux/ring_buffer.h20
-rw-r--r--include/linux/rmap.h11
-rw-r--r--include/linux/rpmb.h123
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/sched.h38
-rw-r--r--include/linux/sched/deadline.h14
-rw-r--r--include/linux/sched/ext.h215
-rw-r--r--include/linux/sched/mm.h44
-rw-r--r--include/linux/sched/prio.h1
-rw-r--r--include/linux/sched/rt.h33
-rw-r--r--include/linux/sched/signal.h7
-rw-r--r--include/linux/sched/task.h8
-rw-r--r--include/linux/sched/task_stack.h18
-rw-r--r--include/linux/scmi_imx_protocol.h59
-rw-r--r--include/linux/security.h63
-rw-r--r--include/linux/seqlock.h25
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h117
-rw-r--r--include/linux/serial_s3c.h24
-rw-r--r--include/linux/set_memory.h8
-rw-r--r--include/linux/shmem_fs.h15
-rw-r--r--include/linux/slab.h245
-rw-r--r--include/linux/smp.h8
-rw-r--r--include/linux/soc/cirrus/ep93xx.h47
-rw-r--r--include/linux/soc/qcom/geni-se.h9
-rw-r--r--include/linux/soundwire/sdw.h2
-rw-r--r--include/linux/soundwire/sdw_intel.h8
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/spi/spi_bitbang.h1
-rw-r--r--include/linux/srcutree.h15
-rw-r--r--include/linux/string.h24
-rw-r--r--include/linux/string_choices.h29
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h51
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--include/linux/sunrpc/svcauth.h6
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sunrpc/xdr.h2
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h243
-rw-r--r--include/linux/sunrpc/xdrgen/_defs.h26
-rw-r--r--include/linux/swap.h44
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/tee_core.h12
-rw-r--r--include/linux/thermal.h31
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/tpm.h2
-rw-r--r--include/linux/tracepoint.h20
-rw-r--r--include/linux/uaccess.h7
-rw-r--r--include/linux/ubsan.h5
-rw-r--r--include/linux/uio.h18
-rw-r--r--include/linux/unaligned.h (renamed from include/asm-generic/unaligned.h)17
-rw-r--r--include/linux/union_find.h41
-rw-r--r--include/linux/unroll.h36
-rw-r--r--include/linux/uprobes.h48
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/func_utils.h86
-rw-r--r--include/linux/usb/gadget_configfs.h7
-rw-r--r--include/linux/usb/serial.h7
-rw-r--r--include/linux/usb/tcpci.h31
-rw-r--r--include/linux/usb/usbnet.h15
-rw-r--r--include/linux/user_namespace.h6
-rw-r--r--include/linux/userfaultfd_k.h19
-rw-r--r--include/linux/vdpa.h9
-rw-r--r--include/linux/virtio_net.h4
-rw-r--r--include/linux/vm_event_item.h26
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/linux/vmstat.h1
-rw-r--r--include/linux/workqueue.h41
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/linux/xz.h81
-rw-r--r--include/linux/zstd.h167
-rw-r--r--include/linux/zswap.h16
-rw-r--r--include/media/cec.h33
-rw-r--r--include/media/rc-core.h2
-rw-r--r--include/media/v4l2-mc.h3
-rw-r--r--include/media/v4l2-subdev.h6
-rw-r--r--include/media/videobuf2-core.h3
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/calipso.h2
-rw-r--r--include/net/cipso_ipv4.h2
-rw-r--r--include/net/genetlink.h3
-rw-r--r--include/net/ieee80211_radiotap.h2
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/mac802154.h2
-rw-r--r--include/net/mctp.h2
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/rtnetlink.h17
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h21
-rw-r--r--include/rdma/ib_hdrs.h2
-rw-r--r--include/rdma/ib_umem.h18
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/iba.h2
-rw-r--r--include/rdma/rdma_netlink.h12
-rw-r--r--include/scsi/fcoe_sysfs.h2
-rw-r--r--include/scsi/scsi_dbg.h7
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/scsi/scsi_transport_fc.h8
-rw-r--r--include/soc/fsl/qe/qe.h23
-rw-r--r--include/sound/aci.h1
-rw-r--r--include/sound/asoundef.h6
-rw-r--r--include/sound/control.h27
-rw-r--r--include/sound/core.h69
-rw-r--r--include/sound/cs35l56.h6
-rw-r--r--include/sound/es1688.h1
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/sound/memalloc.h7
-rw-r--r--include/sound/pcm.h49
-rw-r--r--include/sound/seq_kernel.h4
-rw-r--r--include/sound/snd_wavefront.h4
-rw-r--r--include/sound/soc-acpi-intel-match.h2
-rw-r--r--include/sound/soc-acpi.h6
-rw-r--r--include/sound/soc-card.h2
-rw-r--r--include/sound/soc-component.h3
-rw-r--r--include/sound/soc-dai.h6
-rw-r--r--include/sound/soc-dpcm.h19
-rw-r--r--include/sound/soc.h7
-rw-r--r--include/sound/soc_sdw_utils.h247
-rw-r--r--include/sound/soundfont.h6
-rw-r--r--include/sound/tas2563-tlv.h279
-rw-r--r--include/sound/tas2781-tlv.h260
-rw-r--r--include/sound/tas2781.h71
-rw-r--r--include/sound/ump.h12
-rw-r--r--include/sound/vx_core.h1
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/trace/events/asoc.h3
-rw-r--r--include/trace/events/btrfs.h20
-rw-r--r--include/trace/events/dma.h342
-rw-r--r--include/trace/events/ext4.h1
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/filemap.h84
-rw-r--r--include/trace/events/firewire.h4
-rw-r--r--include/trace/events/intel_ifs.h27
-rw-r--r--include/trace/events/mmflags.h40
-rw-r--r--include/trace/events/netfs.h147
-rw-r--r--include/trace/events/oom.h4
-rw-r--r--include/trace/events/pwm.h10
-rw-r--r--include/trace/events/rcu.h20
-rw-r--r--include/trace/events/rpcrdma.h23
-rw-r--r--include/trace/events/sched_ext.h32
-rw-r--r--include/trace/events/ufs.h399
-rw-r--r--include/trace/events/writeback.h10
-rw-r--r--include/trace/misc/nfs.h1
-rw-r--r--include/uapi/drm/drm_fourcc.h25
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/msm_drm.h2
-rw-r--r--include/uapi/drm/xe_drm.h10
-rw-r--r--include/uapi/linux/android/binder.h36
-rw-r--r--include/uapi/linux/audit.h3
-rw-r--r--include/uapi/linux/auto_fs.h2
-rw-r--r--include/uapi/linux/bits.h3
-rw-r--r--include/uapi/linux/blkdev.h14
-rw-r--r--include/uapi/linux/bpf.h18
-rw-r--r--include/uapi/linux/cec.h9
-rw-r--r--include/uapi/linux/const.h17
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/exfat.h25
-rw-r--r--include/uapi/linux/falloc.h1
-rw-r--r--include/uapi/linux/fcntl.h84
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/hidraw.h1
-rw-r--r--include/uapi/linux/io_uring.h42
-rw-r--r--include/uapi/linux/iommufd.h2
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h106
-rw-r--r--include/uapi/linux/landlock.h30
-rw-r--r--include/uapi/linux/lsm.h1
-rw-r--r--include/uapi/linux/nbd.h8
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/nsfs.h15
-rw-r--r--include/uapi/linux/pci_regs.h41
-rw-r--r--include/uapi/linux/rkisp1-config.h578
-rw-r--r--include/uapi/linux/sched.h1
-rw-r--r--include/uapi/linux/sched/types.h6
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/spi/spi.h5
-rw-r--r--include/uapi/linux/usb/ch9.h8
-rw-r--r--include/uapi/linux/usb/functionfs.h97
-rw-r--r--include/uapi/linux/usb/g_hid.h40
-rw-r--r--include/uapi/linux/usb/gadgetfs.h2
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h5
-rw-r--r--include/uapi/linux/vdpa.h1
-rw-r--r--include/uapi/linux/videodev2.h2
-rw-r--r--include/uapi/linux/virtio_balloon.h16
-rw-r--r--include/uapi/linux/virtio_gpu.h1
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h13
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h9
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h4
-rw-r--r--include/uapi/rdma/rdma_netlink.h16
-rw-r--r--include/uapi/sound/asequencer.h2
-rw-r--r--include/uapi/sound/asound.h17
-rw-r--r--include/uapi/xen/privcmd.h7
-rw-r--r--include/ufs/ufs.h4
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/ufs/ufshci.h5
-rw-r--r--include/vdso/getrandom.h28
-rw-r--r--include/vdso/helpers.h1
-rw-r--r--include/vdso/unaligned.h15
-rw-r--r--include/video/vga.h58
-rw-r--r--include/xen/acpi.h27
-rw-r--r--include/xen/interface/elfnote.h93
-rw-r--r--include/xen/interface/physdev.h17
-rw-r--r--include/xen/pci.h6
531 files changed, 12759 insertions, 4269 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index d768d9c568cf..2da5f4a6e814 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -67,7 +67,6 @@
* General Purpose Events (GPEs)
* Global Lock
* ACPI PM timer
- * FACS table (Waking vectors and Global Lock)
*/
#ifndef ACPI_REDUCED_HARDWARE
#define ACPI_REDUCED_HARDWARE FALSE
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index b1571dd96310..5e0346142f98 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -193,6 +193,7 @@
*/
#ifndef ACPI_NO_ERROR_MESSAGES
#define AE_INFO _acpi_module_name, __LINE__
+#define ACPI_ONCE(_fn, _plist) { static char _done; if (!_done) { _done = 1; _fn _plist; } }
/*
* Error reporting. Callers module and line number are inserted by AE_INFO,
@@ -201,8 +202,10 @@
*/
#define ACPI_INFO(plist) acpi_info plist
#define ACPI_WARNING(plist) acpi_warning plist
+#define ACPI_WARNING_ONCE(plist) ACPI_ONCE(acpi_warning, plist)
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
+#define ACPI_ERROR_ONCE(plist) ACPI_ONCE(acpi_error, plist)
#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist
#define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist
#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist
@@ -214,8 +217,10 @@
#define ACPI_INFO(plist)
#define ACPI_WARNING(plist)
+#define ACPI_WARNING_ONCE(plist)
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
+#define ACPI_ERROR_ONCE(plist)
#define ACPI_BIOS_WARNING(plist)
#define ACPI_BIOS_EXCEPTION(plist)
#define ACPI_BIOS_ERROR(plist)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 8db5bd382915..b2e377b7f337 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -228,10 +228,12 @@ struct acpi_device_dir {
/* Plug and Play */
+#define MAX_ACPI_DEVICE_NAME_LEN 40
+#define MAX_ACPI_CLASS_NAME_LEN 20
typedef char acpi_bus_id[8];
typedef u64 acpi_bus_address;
-typedef char acpi_device_name[40];
-typedef char acpi_device_class[20];
+typedef char acpi_device_name[MAX_ACPI_DEVICE_NAME_LEN];
+typedef char acpi_device_class[MAX_ACPI_CLASS_NAME_LEN];
struct acpi_hardware_id {
struct list_head list;
@@ -255,7 +257,6 @@ struct acpi_device_pnp {
struct list_head ids; /* _HID and _CIDs */
acpi_device_name device_name; /* Driver-determined */
acpi_device_class device_class; /* " */
- union acpi_object *str_obj; /* unicode string for _STR method */
};
#define acpi_device_bid(d) ((d)->pnp.bus_id)
@@ -993,6 +994,8 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev)
int acpi_wait_for_acpi_ipmi(void);
+int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices);
+u32 arch_acpi_add_auto_dep(acpi_handle handle);
#else /* CONFIG_ACPI */
static inline int register_acpi_bus_type(void *bus) { return 0; }
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 9f1c1d225e32..d076ebd19a61 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20240322
+#define ACPI_CA_VERSION 0x20240827
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -878,10 +878,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_leave_sleep_state_prep(u8 sleep_state))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_set_firmware_waking_vector
- (acpi_physical_address physical_address,
- acpi_physical_address physical_address64))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_set_firmware_waking_vector
+ (acpi_physical_address physical_address,
+ acpi_physical_address physical_address64))
/*
* ACPI Timer interfaces
*/
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 841ef9f22795..199afc2cd122 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -567,6 +567,10 @@ struct acpi_cedt_cxims {
u64 xormap_list[];
};
+struct acpi_cedt_cxims_target_element {
+ u64 xormap;
+};
+
/* 3: CXL RCEC Downstream Port Association Structure */
struct acpi_cedt_rdpas {
@@ -751,6 +755,7 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_WITH_GAS 0x0012
#define ACPI_DBG2_SDM845_7_372MHZ 0x0013
#define ACPI_DBG2_INTEL_LPSS 0x0014
+#define ACPI_DBG2_RISCV_SBI_CON 0x0015
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -1791,7 +1796,7 @@ struct acpi_hmat_cache {
u32 reserved1;
u64 cache_size;
u32 cache_attributes;
- u16 reserved2;
+ u16 address_mode;
u16 number_of_SMBIOShandles;
};
@@ -1803,6 +1808,9 @@ struct acpi_hmat_cache {
#define ACPI_HMAT_WRITE_POLICY (0x0000F000)
#define ACPI_HMAT_CACHE_LINE_SIZE (0xFFFF0000)
+#define ACPI_HMAT_CACHE_MODE_UNKNOWN (0)
+#define ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR (1)
+
/* Values for cache associativity flag */
#define ACPI_HMAT_CA_NONE (0)
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index e27958ef8264..d3858eebc255 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1607,7 +1607,7 @@ struct acpi_mpam_msc_node {
u32 max_nrdy_usec;
u64 hardware_id_linked_device;
u32 instance_id_linked_device;
- u32 num_resouce_nodes;
+ u32 num_resource_nodes;
};
struct acpi_table_mpam {
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 8f775e3a08fd..5cd755143b7d 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -92,10 +92,10 @@ struct acpi_table_slit {
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
- * Version 2
+ * Version 4
*
* Conforms to "Serial Port Console Redirection Table",
- * Version 1.03, August 10, 2015
+ * Version 1.10, Jan 5, 2023
*
******************************************************************************/
@@ -112,7 +112,7 @@ struct acpi_table_spcr {
u8 stop_bits;
u8 flow_control;
u8 terminal_type;
- u8 reserved1;
+ u8 language;
u16 pci_device_id;
u16 pci_vendor_id;
u8 pci_bus;
@@ -120,7 +120,11 @@ struct acpi_table_spcr {
u8 pci_function;
u32 pci_flags;
u8 pci_segment;
- u32 reserved2;
+ u32 uart_clk_freq;
+ u32 precise_baudrate;
+ u16 name_space_string_length;
+ u16 name_space_string_offset;
+ char name_space_string[];
};
/* Masks for pci_flags field above */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 85c2dcf2b704..80767e8bf3ad 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1311,6 +1311,7 @@ typedef enum {
#define ACPI_OSI_WIN_10_19H1 0x14
#define ACPI_OSI_WIN_10_20H1 0x15
#define ACPI_OSI_WIN_11 0x16
+#define ACPI_OSI_WIN_11_22H2 0x17
/* Definitions of getopt */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 930b6afba6f4..76e44e102780 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -64,6 +64,8 @@ struct cpc_desc {
int cpu_id;
int write_cmd_status;
int write_cmd_id;
+ /* Lock used for RMW operations in cpc_write() */
+ spinlock_t rmw_lock;
struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
struct acpi_psd_package domain_info;
struct kobject kobj;
@@ -159,34 +161,37 @@ extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
extern int cppc_set_auto_sel(int cpu, bool enable);
+extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
+extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
+extern int amd_detect_prefcore(bool *detected);
#else /* !CONFIG_ACPI_CPPC_LIB */
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_enable(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline bool cppc_perf_ctrs_in_pcc(void)
{
@@ -210,27 +215,39 @@ static inline bool cpc_ffh_supported(void)
}
static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_auto_sel(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
+{
+ return -ENODEV;
+}
+static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
+{
+ return -EOPNOTSUPP;
+}
+static inline int amd_detect_prefcore(bool *detected)
+{
+ return -ENODEV;
}
#endif /* !CONFIG_ACPI_CPPC_LIB */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 337ffa931ee8..3f31df09a9d6 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -252,6 +252,12 @@
#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, pending) pending = 0
#endif
+/* NULL/invalid value to use for destroyed or not-yet-created semaphores. */
+
+#ifndef ACPI_SEMAPHORE_NULL
+#define ACPI_SEMAPHORE_NULL NULL
+#endif
+
/* Flush CPU cache - used when going to sleep. Wbinvd or similar. */
#ifndef ACPI_FLUSH_CPU_CACHE
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 620b6da429d4..1b43c3a77012 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -58,7 +58,6 @@ mandatory-y += tlbflush.h
mandatory-y += topology.h
mandatory-y += trace_clock.h
mandatory-y += uaccess.h
-mandatory-y += unaligned.h
mandatory-y += vermagic.h
mandatory-y += vga.h
mandatory-y += video.h
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 4dbb177d1150..6eea3b3c1e65 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
- * and arch_unmap to be included in asm-FOO/mmu_context.h for any
- * arch FOO which doesn't need to hook these.
+ * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap
+ * to be included in asm-FOO/mmu_context.h for any arch FOO which
+ * doesn't need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
@@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/asm-generic/mmzone.h b/include/asm-generic/mmzone.h
new file mode 100644
index 000000000000..2ab5193e8394
--- /dev/null
+++ b/include/asm-generic/mmzone.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_MMZONE_H
+#define _ASM_GENERIC_MMZONE_H
+
+#endif
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index c32e0cf23c90..e063d6487f66 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -32,10 +32,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
void __init arch_numa_init(void);
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-void __init numa_set_distance(int from, int to, int distance);
-void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
-int __init early_cpu_to_node(int cpu);
+int early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -51,4 +49,8 @@ static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_NUMA_EMU
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
+#endif
+
#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index a5be9e61a2a2..b276f783494c 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -11,7 +11,7 @@
#include <asm-generic/access_ok.h>
#ifdef CONFIG_UACCESS_MEMCPY
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static __always_inline int
__get_user_fn(size_t size, const void __user *from, void *to)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1ae44793132a..eeadbaeccf88 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -133,6 +133,7 @@
*(__dl_sched_class) \
*(__rt_sched_class) \
*(__fair_sched_class) \
+ *(__ext_sched_class) \
*(__idle_sched_class) \
__sched_class_lowest = .;
@@ -918,6 +919,10 @@
#define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x)
+#define RUNTIME_CONST_VARIABLES \
+ RUNTIME_CONST(shift, d_hash_shift) \
+ RUNTIME_CONST(ptr, dentry_hashtable)
+
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
. = ALIGN(8); \
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index b3ea73b81944..5bae6a55b333 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -15,7 +15,7 @@
#ifndef _CRYPTO_CHACHA_H
#define _CRYPTO_CHACHA_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
index 0717a53ae732..065f00e4bf40 100644
--- a/include/crypto/internal/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -27,7 +27,7 @@
#define _CRYPTO_ECC_H
#include <crypto/ecc_curve.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 196aa769f296..e614594f88c1 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -6,7 +6,7 @@
#ifndef _CRYPTO_INTERNAL_POLY1305_H
#define _CRYPTO_INTERNAL_POLY1305_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#include <crypto/poly1305.h>
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index d2316242a988..be97b97a75dd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -14,11 +14,10 @@
struct simd_skcipher_alg;
struct skcipher_alg;
-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
+ const char *algname,
const char *drvname,
const char *basename);
-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
- const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
@@ -32,13 +31,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_aead_alg;
struct aead_alg;
-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
- const char *drvname,
- const char *basename);
-struct simd_aead_alg *simd_aead_create(const char *algname,
- const char *basename);
-void simd_aead_free(struct simd_aead_alg *alg);
-
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs);
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 2e0e7c3827d1..0c342ed0d038 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/string.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index ab904d82236f..e0418818d63c 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -9,7 +9,7 @@
#define _CRYPTO_SHA256_BASE_H
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
#include <linux/string.h>
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index b370b3340b16..679916a84cb2 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/string.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
int blocks);
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 2f3a32ab97bb..b33ed39c2bce 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -14,7 +14,7 @@
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
index acbb917a00c6..2594f45777b5 100644
--- a/include/crypto/utils.h
+++ b/include/crypto/utils.h
@@ -7,7 +7,7 @@
#ifndef _CRYPTO_UTILS_H
#define _CRYPTO_UTILS_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/compiler_attributes.h>
#include <linux/types.h>
diff --git a/include/linux/einj-cxl.h b/include/cxl/einj.h
index 624ff6ff41f9..624ff6ff41f9 100644
--- a/include/linux/einj-cxl.h
+++ b/include/cxl/einj.h
diff --git a/include/linux/cxl-event.h b/include/cxl/event.h
index 0bea1afbd747..0bea1afbd747 100644
--- a/include/linux/cxl-event.h
+++ b/include/cxl/event.h
diff --git a/include/cxl/mailbox.h b/include/cxl/mailbox.h
new file mode 100644
index 000000000000..bacd111e75f1
--- /dev/null
+++ b/include/cxl/mailbox.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. */
+#ifndef __CXL_MBOX_H__
+#define __CXL_MBOX_H__
+#include <linux/rcuwait.h>
+
+struct cxl_mbox_cmd;
+
+/**
+ * struct cxl_mailbox - context for CXL mailbox operations
+ * @host: device that hosts the mailbox
+ * @payload_size: Size of space for payload
+ * (CXL 3.1 8.2.8.4.3 Mailbox Capabilities Register)
+ * @mbox_mutex: mutex protects device mailbox and firmware
+ * @mbox_wait: rcuwait for mailbox
+ * @mbox_send: @dev specific transport for transmitting mailbox commands
+ */
+struct cxl_mailbox {
+ struct device *host;
+ size_t payload_size;
+ struct mutex mbox_mutex; /* lock to protect mailbox context */
+ struct rcuwait mbox_wait;
+ int (*mbox_send)(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd);
+};
+
+int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host);
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 173548c6473a..a6f8b098c56f 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -1543,6 +1543,10 @@ enum drm_dp_phy {
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_OUI_PHY_REPEATER1 0xf003d /* 1.3 */
+#define DP_OUI_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_OUI_PHY_REPEATER1)
+
#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
#define DP_FEC_BASE(dp_phy) \
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index ea03e1dd26ba..279624833ea9 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -112,6 +112,7 @@ struct drm_dp_vsc_sdp {
* @target_rr: Target Refresh
* @duration_incr_ms: Successive frame duration increase
* @duration_decr_ms: Successive frame duration decrease
+ * @target_rr_divider: Target refresh rate divider
* @mode: Adaptive Sync Operation Mode
*/
struct drm_dp_as_sdp {
@@ -657,6 +658,8 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy);
+
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index cfe096389d94..f6a1cbb0f600 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -244,18 +244,18 @@ struct drm_dp_mst_branch {
bool link_address_sent;
/* global unique identifier to identify branch devices */
- u8 guid[16];
+ guid_t guid;
};
struct drm_dp_nak_reply {
- u8 guid[16];
+ guid_t guid;
u8 reason;
u8 nak_data;
};
struct drm_dp_link_address_ack_reply {
- u8 guid[16];
+ guid_t guid;
u8 nports;
struct drm_dp_link_addr_reply_port {
bool input_port;
@@ -265,7 +265,7 @@ struct drm_dp_link_address_ack_reply {
bool ddps;
bool legacy_device_plug_status;
u8 dpcd_revision;
- u8 peer_guid[16];
+ guid_t peer_guid;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
} ports[16];
@@ -348,7 +348,7 @@ struct drm_dp_allocate_payload_ack_reply {
};
struct drm_dp_connection_status_notify {
- u8 guid[16];
+ guid_t guid;
u8 port_number;
bool legacy_device_plug_status;
bool displayport_device_plug_status;
@@ -425,7 +425,7 @@ struct drm_dp_query_payload {
struct drm_dp_resource_status_notify {
u8 port_number;
- u8 guid[16];
+ guid_t guid;
u16 available_pbn;
};
@@ -885,6 +885,8 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);
+
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index f4d3784b1dce..038ccb02f9a3 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -28,7 +28,8 @@
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek, \
- .mmap = drm_gem_mmap
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
/**
* DEFINE_DRM_ACCEL_FOPS() - macro to generate file operations for accelerators drivers
@@ -51,11 +52,10 @@
#if IS_ENABLED(CONFIG_DRM_ACCEL)
+extern struct xarray accel_minors_xa;
+
void accel_core_exit(void);
int accel_core_init(void);
-void accel_minor_remove(int index);
-int accel_minor_alloc(void);
-void accel_minor_replace(struct drm_minor *minor, int index);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
void accel_debugfs_init(struct drm_device *dev);
@@ -73,19 +73,6 @@ static inline int __init accel_core_init(void)
return 0;
}
-static inline void accel_minor_remove(int index)
-{
-}
-
-static inline int accel_minor_alloc(void)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void accel_minor_replace(struct drm_minor *minor, int index)
-{
-}
-
static inline void accel_set_device_instance_params(struct device *kdev, int index)
{
}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 4d7f4c5f2001..31ca88deb10d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -460,7 +460,7 @@ struct drm_atomic_state {
*
* Used for signaling unbound planes/connectors.
* When a connector or plane is not bound to any CRTC, it's still important
- * to preserve linearity to prevent the atomic states from being freed to early.
+ * to preserve linearity to prevent the atomic states from being freed too early.
*
* This commit (if set) is not bound to any CRTC, but will be completed when
* drm_atomic_helper_commit_hw_done() is called.
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index c754651044d4..e3fa43291f44 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -471,14 +471,6 @@ enum drm_privacy_screen_status {
*
* DP definitions come from the DP v2.0 spec
* HDMI definitions come from the CTA-861-H spec
- *
- * A note on YCC and RGB variants:
- *
- * Since userspace is not aware of the encoding on the wire
- * (RGB or YCbCr), drivers are free to pick the appropriate
- * variant, regardless of what userspace selects. E.g., if
- * BT2020_RGB is selected by userspace a driver will pick
- * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
*
* @DRM_MODE_COLORIMETRY_DEFAULT:
* Driver specific behavior.
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 63767cf24371..c91f87b5242d 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -213,8 +213,9 @@ struct drm_device {
* This can be set to true it the hardware has a working vblank counter
* with high-precision timestamping (otherwise there are races) and the
* driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
- * appropriately. See also @max_vblank_count and
- * &drm_crtc_funcs.get_vblank_counter.
+ * appropriately. Also, see @max_vblank_count,
+ * &drm_crtc_funcs.get_vblank_counter and
+ * &drm_vblank_crtc_config.disable_immediate.
*/
bool vblank_disable_immediate;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index cd37936c3926..02ea4e3248fd 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -229,34 +229,6 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
/**
- * @lastclose:
- *
- * Called when the last &struct drm_file has been closed and there's
- * currently no userspace client for the &struct drm_device.
- *
- * Modern drivers should only use this to force-restore the fbdev
- * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
- * Anything else would indicate there's something seriously wrong.
- * Modern drivers can also use this to execute delayed power switching
- * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
- * infrastructure.
- *
- * This is called after @postclose hook has been called.
- *
- * NOTE:
- *
- * All legacy drivers use this callback to de-initialize the hardware.
- * This is purely because of the shadow-attach model, where the DRM
- * kernel driver does not really own the hardware. Instead ownershipe is
- * handled with the help of userspace through an inheritedly racy dance
- * to set/unset the VT into raw mode.
- *
- * Legacy drivers initialize the hardware in the @firstopen callback,
- * which isn't even called for modern drivers.
- */
- void (*lastclose) (struct drm_device *);
-
- /**
* @unload:
*
* Reverse the effects of the driver load callback. Ideally,
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 6bdfa254a1c1..eaac5e665892 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -440,8 +440,6 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
int drm_edid_header_is_valid(const void *edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
- bool *edid_corrupt);
bool drm_edid_is_valid(struct edid *edid);
void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 375737fd6c36..699f2790b9ac 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -271,9 +271,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-
void drm_fb_helper_lastclose(struct drm_device *dev);
-void drm_fb_helper_output_poll_changed(struct drm_device *dev);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -401,10 +399,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
static inline void drm_fb_helper_lastclose(struct drm_device *dev)
{
}
-
-static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
-{
-}
#endif
#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index ab230d3af138..8c0030c77308 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -45,6 +45,8 @@ struct drm_printer;
struct device;
struct file;
+extern struct xarray drm_minors_xa;
+
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
* header include loops we need it here for now.
@@ -434,6 +436,9 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
void drm_file_update_pid(struct drm_file *);
+struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
int drm_open(struct inode *inode, struct file *filp);
int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index ef8bc8d72039..1922188f00e8 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -25,8 +25,9 @@
#ifndef DRM_FIXED_H
#define DRM_FIXED_H
-#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
typedef union dfixed {
u32 full;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bae4865b2101..d8b86df2ec0d 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -447,7 +447,8 @@ struct drm_gem_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
- .mmap = drm_gem_mmap
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
/**
* DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
index a827bde494f6..f2678e7ecb98 100644
--- a/include/drm/drm_gem_dma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -267,6 +267,7 @@ unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
+ .fop_flags = FOP_UNSIGNED_OFFSET, \
DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
}
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 0f520eeeaa8e..f725f8654611 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -365,6 +365,18 @@ void mipi_dsi_dcs_set_display_off_multi(struct mipi_dsi_multi_context *ctx);
void mipi_dsi_dcs_set_display_on_multi(struct mipi_dsi_multi_context *ctx);
void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx,
enum mipi_dsi_dcs_tear_mode mode);
+void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx,
+ u16 brightness);
+void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx,
+ u8 format);
+void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
+ u16 scanline);
/**
* mipi_dsi_generic_write_seq - transmit data using a generic write packet
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index ab0f167474b1..271765e2e9f2 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -98,22 +98,6 @@ struct drm_mode_config_funcs {
const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
/**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
- *
- * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
- * to call this hook to inform the fbdev helper of output changes.
- *
- * This hook is deprecated, drivers should instead implement fbdev
- * support with struct drm_client, which takes care of any necessary
- * hotplug event forwarding already without further involvement by
- * the driver.
- */
- void (*output_poll_changed)(struct drm_device *dev);
-
- /**
* @mode_valid:
*
* Device specific validation of display modes. Can be used to reject
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index 73bb3f3d9ed9..54085d5d05c3 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -1,4 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+/*
+ * Copyright (c) 2024 Intel
+ * Copyright (c) 2024 Red Hat
+ */
+
#ifndef __DRM_PANIC_H__
#define __DRM_PANIC_H__
@@ -8,9 +14,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
-/*
- * Copyright (c) 2024 Intel
- */
/**
* struct drm_scanout_buffer - DRM scanout buffer
@@ -146,16 +149,4 @@ struct drm_scanout_buffer {
#define drm_panic_unlock(dev, flags) \
raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags)
-#ifdef CONFIG_DRM_PANIC
-
-void drm_panic_register(struct drm_device *dev);
-void drm_panic_unregister(struct drm_device *dev);
-
-#else
-
-static inline void drm_panic_register(struct drm_device *dev) {}
-static inline void drm_panic_unregister(struct drm_device *dev) {}
-
-#endif
-
#endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 2a1d01e5b56b..fa085c44d4ca 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -69,6 +69,9 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags);
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 5d9dff5149c9..d2676831d765 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -221,7 +221,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
/**
* struct drm_print_iterator - local struct used with drm_printer_coredump
- * @data: Pointer to the devcoredump output buffer
+ * @data: Pointer to the devcoredump output buffer, can be NULL if using
+ * drm_printer_coredump to determine size of devcoredump
* @start: The offset within the buffer to start writing
* @remain: The number of bytes to write for this iteration
*/
@@ -266,6 +267,57 @@ struct drm_print_iterator {
* coredump_read, ...)
* }
*
+ * The above example has a time complexity of O(N^2), where N is the size of the
+ * devcoredump. This is acceptable for small devcoredumps but scales poorly for
+ * larger ones.
+ *
+ * Another use case for drm_coredump_printer is to capture the devcoredump into
+ * a saved buffer before the dev_coredump() callback. This involves two passes:
+ * one to determine the size of the devcoredump and another to print it to a
+ * buffer. Then, in dev_coredump(), copy from the saved buffer into the
+ * devcoredump read buffer.
+ *
+ * For example::
+ *
+ * char *devcoredump_saved_buffer;
+ *
+ * ssize_t __coredump_print(char *buffer, ssize_t count, ...)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = 0;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * ...
+ * return count - iter.remain;
+ * }
+ *
+ * void coredump_print(...)
+ * {
+ * ssize_t count;
+ *
+ * count = __coredump_print(NULL, INT_MAX, ...);
+ * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL);
+ * __coredump_print(devcoredump_saved_buffer, count, ...);
+ * }
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * ...
+ * memcpy(buffer, devcoredump_saved_buffer + offset, count);
+ * ...
+ * }
+ *
+ * The above example has a time complexity of O(N*2), where N is the size of the
+ * devcoredump. This scales better than the previous example for larger
+ * devcoredumps.
+ *
* RETURNS:
* The &drm_printer object
*/
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 73fcb899a01d..46f09cf68458 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -238,6 +238,21 @@ static inline void drm_rect_fp_to_int(struct drm_rect *dst,
drm_rect_height(src) >> 16);
}
+/**
+ * drm_rect_overlap - Check if two rectangles overlap
+ * @a: first rectangle
+ * @b: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles overlap, %false otherwise.
+ */
+static inline bool drm_rect_overlap(const struct drm_rect *a,
+ const struct drm_rect *b)
+{
+ return (a->x2 > b->x1 && b->x2 > a->x1 &&
+ a->y2 > b->y1 && b->y2 > a->y1);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c8f829b4307c..151ab1e85b1b 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -79,6 +79,31 @@ struct drm_pending_vblank_event {
};
/**
+ * struct drm_vblank_crtc_config - vblank configuration for a CRTC
+ */
+struct drm_vblank_crtc_config {
+ /**
+ * @offdelay_ms: Vblank off delay in ms, used to determine how long
+ * &drm_vblank_crtc.disable_timer waits before disabling.
+ *
+ * Defaults to the value of drm_vblank_offdelay in drm_crtc_vblank_on().
+ */
+ int offdelay_ms;
+
+ /**
+ * @disable_immediate: See &drm_device.vblank_disable_immediate
+ * for the exact semantics of immediate vblank disabling.
+ *
+ * Additionally, this tracks the disable immediate value per crtc, just
+ * in case it needs to differ from the default value for a given device.
+ *
+ * Defaults to the value of &drm_device.vblank_disable_immediate in
+ * drm_crtc_vblank_on().
+ */
+ bool disable_immediate;
+};
+
+/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
@@ -99,8 +124,8 @@ struct drm_vblank_crtc {
wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
- * hysteresis logic. Vblank disabling is controlled through the
- * drm_vblank_offdelay module option and the setting of the
+ * hysteresis logic. Vblank disabling is controlled through
+ * &drm_vblank_crtc_config.offdelay_ms and the setting of the
* &drm_device.max_vblank_count value.
*/
struct timer_list disable_timer;
@@ -199,6 +224,12 @@ struct drm_vblank_crtc {
struct drm_display_mode hwmode;
/**
+ * @config: Stores vblank configuration values for a given CRTC.
+ * Also, see drm_crtc_vblank_on_config().
+ */
+ struct drm_vblank_crtc_config config;
+
+ /**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
@@ -247,6 +278,8 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 5acc64954a88..9c437a057e5d 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -574,12 +574,12 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
+void drm_sched_start(struct drm_gpu_scheduler *sched);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 6ccf96c91f3a..7b56d1ca36d7 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -190,6 +190,41 @@ struct ttm_operation_ctx {
uint64_t bytes_moved;
};
+struct ttm_lru_walk;
+
+/** struct ttm_lru_walk_ops - Operations for a LRU walk. */
+struct ttm_lru_walk_ops {
+ /**
+ * process_bo - Process this bo.
+ * @walk: struct ttm_lru_walk describing the walk.
+ * @bo: A locked and referenced buffer object.
+ *
+ * Return: Negative error code on error, User-defined positive value
+ * (typically, but not always, size of the processed bo) on success.
+ * On success, the returned values are summed by the walk and the
+ * walk exits when its target is met.
+ * 0 also indicates success, -EBUSY means this bo was skipped.
+ */
+ s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @ctx: Pointer to the struct ttm_operation_ctx. */
+ struct ttm_operation_ctx *ctx;
+ /** @ticket: The struct ww_acquire_ctx if any. */
+ struct ww_acquire_ctx *ticket;
+ /** @trylock_only: Only use trylock for locking. */
+ bool trylock_only;
+};
+
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target);
+
/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
@@ -378,15 +413,14 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags);
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target);
void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo);
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket);
+int ttm_bo_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 69769355139f..be034be56ba1 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -49,6 +49,43 @@ struct io_mapping;
struct sg_table;
struct scatterlist;
+/**
+ * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
+ */
+enum ttm_lru_item_type {
+ /** @TTM_LRU_RESOURCE: The resource subclass */
+ TTM_LRU_RESOURCE,
+ /** @TTM_LRU_HITCH: The iterator hitch subclass */
+ TTM_LRU_HITCH
+};
+
+/**
+ * struct ttm_lru_item - The TTM lru list node base class
+ * @link: The list link
+ * @type: The subclass type
+ */
+struct ttm_lru_item {
+ struct list_head link;
+ enum ttm_lru_item_type type;
+};
+
+/**
+ * ttm_lru_item_init() - initialize a struct ttm_lru_item
+ * @item: The item to initialize
+ * @type: The subclass type
+ */
+static inline void ttm_lru_item_init(struct ttm_lru_item *item,
+ enum ttm_lru_item_type type)
+{
+ item->type = type;
+ INIT_LIST_HEAD(&item->link);
+}
+
+static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
+{
+ return item->type == TTM_LRU_RESOURCE;
+}
+
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
@@ -217,19 +254,20 @@ struct ttm_resource {
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
- struct list_head lru;
+ struct ttm_lru_item lru;
};
/**
- * struct ttm_resource_cursor
+ * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
+ * @item: The struct ttm_lru_item to downcast
*
- * @priority: the current priority
- *
- * Cursor to iterate over the resources in a manager.
+ * Return: Pointer to the embedding struct ttm_resource
*/
-struct ttm_resource_cursor {
- unsigned int priority;
-};
+static inline struct ttm_resource *
+ttm_lru_item_to_res(struct ttm_lru_item *item)
+{
+ return container_of(item, struct ttm_resource, lru);
+}
/**
* struct ttm_lru_bulk_move_pos
@@ -246,8 +284,9 @@ struct ttm_lru_bulk_move_pos {
/**
* struct ttm_lru_bulk_move
- *
* @pos: first/last lru entry for resources in the each domain/priority
+ * @cursor_list: The list of cursors currently traversing any of
+ * the sublists of @pos. Protected by the ttm device's lru_lock.
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
@@ -257,9 +296,38 @@ struct ttm_lru_bulk_move_pos {
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+ struct list_head cursor_list;
};
/**
+ * struct ttm_resource_cursor
+ * @man: The resource manager currently being iterated over
+ * @hitch: A hitch list node inserted before the next resource
+ * to iterate over.
+ * @bulk_link: A list link for the list of cursors traversing the
+ * bulk sublist of @bulk. Protected by the ttm device's lru_lock.
+ * @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
+ * inserted to. NULL if none. Never dereference this pointer since
+ * the struct ttm_lru_bulk_move object pointed to might have been
+ * freed. The pointer is only for comparison.
+ * @mem_type: The memory type of the LRU list being traversed.
+ * This field is valid iff @bulk != NULL.
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ struct ttm_resource_manager *man;
+ struct ttm_lru_item hitch;
+ struct list_head bulk_link;
+ struct ttm_lru_bulk_move *bulk;
+ unsigned int mem_type;
+ unsigned int priority;
+};
+
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
+
+/**
* struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
* struct sg_table backed struct ttm_resource.
* @base: Embedded struct ttm_kmap_iter providing the usage interface.
@@ -347,6 +415,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
@@ -389,9 +459,10 @@ struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor);
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res);
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
+
+struct ttm_resource *
+ttm_lru_first_res_or_null(struct list_head *head);
/**
* ttm_resource_manager_for_each_res - iterate over all resources
@@ -403,7 +474,7 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
*/
#define ttm_resource_manager_for_each_res(man, cursor, res) \
for (res = ttm_resource_manager_first(man, cursor); res; \
- res = ttm_resource_manager_next(man, cursor, res))
+ res = ttm_resource_manager_next(cursor))
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index d6c9e9472121..8332f8d82f96 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -234,11 +234,13 @@
#define QCOM_ID_SA8540P 461
#define QCOM_ID_QCM4290 469
#define QCOM_ID_QCS4290 470
+#define QCOM_ID_SM7325 475
#define QCOM_ID_SM8450_2 480
#define QCOM_ID_SM8450_3 482
#define QCOM_ID_SC7280 487
#define QCOM_ID_SC7180P 495
#define QCOM_ID_QCM6490 497
+#define QCOM_ID_SM7325P 499
#define QCOM_ID_IPQ5000 503
#define QCOM_ID_IPQ0509 504
#define QCOM_ID_IPQ0518 505
@@ -274,6 +276,8 @@
#define QCOM_ID_QCM8550 604
#define QCOM_ID_IPQ5300 624
#define QCOM_ID_IPQ5321 650
+#define QCOM_ID_QCS8300 674
+#define QCOM_ID_QCS8275 675
/*
* The board type and revision information, used by Qualcomm bootloaders and
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 3e3972a814c1..6ede88c3992d 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -38,6 +38,10 @@
#define PMC_CPU (PMC_MAIN + 9)
#define PMC_MCK1 (PMC_MAIN + 10)
+/* SAM9X7 */
+#define PMC_PLLADIV2 (PMC_MAIN + 11)
+#define PMC_LVDSPLL (PMC_MAIN + 12)
+
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
#define AT91_PMC_LOCKA 1 /* PLLA Lock */
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index 08c82c22fa5f..607f23b83fa7 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -155,5 +155,12 @@
#define AUD_CLKID_SYSCLK_B_DIV 175
#define AUD_CLKID_SYSCLK_A_EN 176
#define AUD_CLKID_SYSCLK_B_EN 177
+#define AUD_CLKID_EARCRX 178
+#define AUD_CLKID_EARCRX_CMDC_SEL 179
+#define AUD_CLKID_EARCRX_CMDC_DIV 180
+#define AUD_CLKID_EARCRX_CMDC 181
+#define AUD_CLKID_EARCRX_DMAC_SEL 182
+#define AUD_CLKID_EARCRX_DMAC_DIV 183
+#define AUD_CLKID_EARCRX_DMAC 184
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/cirrus,ep9301-syscon.h b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
new file mode 100644
index 000000000000..6bb8f532e7d0
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+#define DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+
+#define EP93XX_CLK_PLL1 0
+#define EP93XX_CLK_PLL2 1
+
+#define EP93XX_CLK_FCLK 2
+#define EP93XX_CLK_HCLK 3
+#define EP93XX_CLK_PCLK 4
+
+#define EP93XX_CLK_UART 5
+#define EP93XX_CLK_SPI 6
+#define EP93XX_CLK_PWM 7
+#define EP93XX_CLK_USB 8
+
+#define EP93XX_CLK_M2M0 9
+#define EP93XX_CLK_M2M1 10
+
+#define EP93XX_CLK_M2P0 11
+#define EP93XX_CLK_M2P1 12
+#define EP93XX_CLK_M2P2 13
+#define EP93XX_CLK_M2P3 14
+#define EP93XX_CLK_M2P4 15
+#define EP93XX_CLK_M2P5 16
+#define EP93XX_CLK_M2P6 17
+#define EP93XX_CLK_M2P7 18
+#define EP93XX_CLK_M2P8 19
+#define EP93XX_CLK_M2P9 20
+
+#define EP93XX_CLK_UART1 21
+#define EP93XX_CLK_UART2 22
+#define EP93XX_CLK_UART3 23
+
+#define EP93XX_CLK_ADC 24
+#define EP93XX_CLK_ADC_EN 25
+
+#define EP93XX_CLK_KEYPAD 26
+
+#define EP93XX_CLK_VIDEO 27
+
+#define EP93XX_CLK_I2S_MCLK 28
+#define EP93XX_CLK_I2S_SCLK 29
+#define EP93XX_CLK_I2S_LRCLK 30
+
+#endif /* DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
index 255e3aa94323..cfede84b46b9 100644
--- a/include/dt-bindings/clock/exynos7885.h
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -69,6 +69,8 @@
#define CLK_GOUT_FSYS_MMC_EMBD 58
#define CLK_GOUT_FSYS_MMC_SDIO 59
#define CLK_GOUT_FSYS_USB30DRD 60
+#define CLK_MOUT_SHARED0_PLL 61
+#define CLK_MOUT_SHARED1_PLL 62
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -132,16 +134,24 @@
#define CLK_GOUT_WDT1_PCLK 43
/* CMU_FSYS */
-#define CLK_MOUT_FSYS_BUS_USER 1
-#define CLK_MOUT_FSYS_MMC_CARD_USER 2
-#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
-#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
-#define CLK_MOUT_FSYS_USB30DRD_USER 4
-#define CLK_GOUT_MMC_CARD_ACLK 5
-#define CLK_GOUT_MMC_CARD_SDCLKIN 6
-#define CLK_GOUT_MMC_EMBD_ACLK 7
-#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
-#define CLK_GOUT_MMC_SDIO_ACLK 9
-#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define CLK_MOUT_FSYS_BUS_USER 1
+#define CLK_MOUT_FSYS_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
+#define CLK_GOUT_MMC_CARD_ACLK 5
+#define CLK_GOUT_MMC_CARD_SDCLKIN 6
+#define CLK_GOUT_MMC_EMBD_ACLK 7
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
+#define CLK_GOUT_MMC_SDIO_ACLK 9
+#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define CLK_MOUT_FSYS_USB30DRD_USER 11
+#define CLK_MOUT_USB_PLL 12
+#define CLK_FOUT_USB_PLL 13
+#define CLK_FSYS_USB20PHY_CLKCORE 14
+#define CLK_FSYS_USB30DRD_ACLK_20PHYCTRL 15
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_0 16
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_1 17
+#define CLK_FSYS_USB30DRD_BUS_CLK_EARLY 18
+#define CLK_FSYS_USB30DRD_REF_CLK 19
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
index 7666241520f8..80dacda57229 100644
--- a/include/dt-bindings/clock/exynos850.h
+++ b/include/dt-bindings/clock/exynos850.h
@@ -358,6 +358,7 @@
#define CLK_GOUT_UART_PCLK 32
#define CLK_GOUT_WDT0_PCLK 33
#define CLK_GOUT_WDT1_PCLK 34
+#define CLK_GOUT_BUSIF_TMU_PCLK 35
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
diff --git a/include/dt-bindings/clock/nxp,imx95-clock.h b/include/dt-bindings/clock/nxp,imx95-clock.h
index 782662c3e740..b7a713a9ac8c 100644
--- a/include/dt-bindings/clock/nxp,imx95-clock.h
+++ b/include/dt-bindings/clock/nxp,imx95-clock.h
@@ -25,4 +25,7 @@
#define IMX95_CLK_DISPMIX_ENG0_SEL 0
#define IMX95_CLK_DISPMIX_ENG1_SEL 1
+#define IMX95_CLK_NETCMIX_ENETC0_RMII 0
+#define IMX95_CLK_NETCMIX_ENETC1_RMII 1
+
#endif /* __DT_BINDINGS_CLOCK_IMX95_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
index 5b1416fcde6f..a2abf1995c34 100644
--- a/include/dt-bindings/clock/px30-cru.h
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -175,8 +175,6 @@
#define PCLK_CIF 352
#define PCLK_OTP_PHY 353
-#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
-
/* pmu-clocks indices */
#define PLL_GPLL 1
@@ -195,8 +193,6 @@
#define PCLK_GPIO0_PMU 20
#define PCLK_UART0_PMU 21
-#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index b5456a64d421..5b0dde080900 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -193,10 +193,15 @@
#define GCC_MMSS_GPLL0_DIV_CLK 184
#define GCC_GPU_GPLL0_DIV_CLK 185
#define GCC_GPU_GPLL0_CLK 186
+#define HLOS1_VOTE_LPASS_CORE_SMMU_CLK 187
+#define HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 188
+#define GCC_MSS_Q6_BIMC_AXI_CLK 189
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
#define USB_30_GDSC 2
+#define LPASS_ADSP_GDSC 3
+#define LPASS_CORE_GDSC 4
#define GCC_BLSP1_QUP1_BCR 0
#define GCC_BLSP1_QUP2_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
index 90c6e021a035..e364006aa6ea 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc8180x.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
@@ -248,6 +248,7 @@
#define GCC_USB3_SEC_CLKREF_CLK 238
#define GCC_UFS_MEM_CLKREF_EN 239
#define GCC_UFS_CARD_CLKREF_EN 240
+#define GPLL9 241
#define GCC_EMAC_BCR 0
#define GCC_GPU_BCR 1
@@ -294,6 +295,10 @@
#define GCC_VIDEO_AXI0_CLK_BCR 42
#define GCC_VIDEO_AXI1_CLK_BCR 43
#define GCC_USB3_DP_PHY_SEC_BCR 44
+#define GCC_USB3_UNIPHY_MP0_BCR 45
+#define GCC_USB3_UNIPHY_MP1_BCR 46
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 47
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 48
/* GCC GDSCRs */
#define EMAC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,sm4450-camcc.h b/include/dt-bindings/clock/qcom,sm4450-camcc.h
new file mode 100644
index 000000000000..bf077951bf1c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-camcc.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_CLK 2
+#define CAM_CC_BPS_CLK_SRC 3
+#define CAM_CC_CAMNOC_ATB_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_AXI_HF_CLK 7
+#define CAM_CC_CAMNOC_AXI_SF_CLK 8
+#define CAM_CC_CCI_0_CLK 9
+#define CAM_CC_CCI_0_CLK_SRC 10
+#define CAM_CC_CCI_1_CLK 11
+#define CAM_CC_CCI_1_CLK_SRC 12
+#define CAM_CC_CORE_AHB_CLK 13
+#define CAM_CC_CPAS_AHB_CLK 14
+#define CAM_CC_CPHY_RX_CLK_SRC 15
+#define CAM_CC_CRE_AHB_CLK 16
+#define CAM_CC_CRE_CLK 17
+#define CAM_CC_CRE_CLK_SRC 18
+#define CAM_CC_CSI0PHYTIMER_CLK 19
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI1PHYTIMER_CLK 21
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI2PHYTIMER_CLK 23
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSIPHY0_CLK 25
+#define CAM_CC_CSIPHY1_CLK 26
+#define CAM_CC_CSIPHY2_CLK 27
+#define CAM_CC_FAST_AHB_CLK_SRC 28
+#define CAM_CC_ICP_ATB_CLK 29
+#define CAM_CC_ICP_CLK 30
+#define CAM_CC_ICP_CLK_SRC 31
+#define CAM_CC_ICP_CTI_CLK 32
+#define CAM_CC_ICP_TS_CLK 33
+#define CAM_CC_MCLK0_CLK 34
+#define CAM_CC_MCLK0_CLK_SRC 35
+#define CAM_CC_MCLK1_CLK 36
+#define CAM_CC_MCLK1_CLK_SRC 37
+#define CAM_CC_MCLK2_CLK 38
+#define CAM_CC_MCLK2_CLK_SRC 39
+#define CAM_CC_MCLK3_CLK 40
+#define CAM_CC_MCLK3_CLK_SRC 41
+#define CAM_CC_OPE_0_AHB_CLK 42
+#define CAM_CC_OPE_0_AREG_CLK 43
+#define CAM_CC_OPE_0_CLK 44
+#define CAM_CC_OPE_0_CLK_SRC 45
+#define CAM_CC_PLL0 46
+#define CAM_CC_PLL0_OUT_EVEN 47
+#define CAM_CC_PLL0_OUT_ODD 48
+#define CAM_CC_PLL1 49
+#define CAM_CC_PLL1_OUT_EVEN 50
+#define CAM_CC_PLL2 51
+#define CAM_CC_PLL2_OUT_EVEN 52
+#define CAM_CC_PLL3 53
+#define CAM_CC_PLL3_OUT_EVEN 54
+#define CAM_CC_PLL4 55
+#define CAM_CC_PLL4_OUT_EVEN 56
+#define CAM_CC_SLOW_AHB_CLK_SRC 57
+#define CAM_CC_SOC_AHB_CLK 58
+#define CAM_CC_SYS_TMR_CLK 59
+#define CAM_CC_TFE_0_AHB_CLK 60
+#define CAM_CC_TFE_0_CLK 61
+#define CAM_CC_TFE_0_CLK_SRC 62
+#define CAM_CC_TFE_0_CPHY_RX_CLK 63
+#define CAM_CC_TFE_0_CSID_CLK 64
+#define CAM_CC_TFE_0_CSID_CLK_SRC 65
+#define CAM_CC_TFE_1_AHB_CLK 66
+#define CAM_CC_TFE_1_CLK 67
+#define CAM_CC_TFE_1_CLK_SRC 68
+#define CAM_CC_TFE_1_CPHY_RX_CLK 69
+#define CAM_CC_TFE_1_CSID_CLK 70
+#define CAM_CC_TFE_1_CSID_CLK_SRC 71
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_ICP_BCR 10
+#define CAM_CC_MCLK0_BCR 11
+#define CAM_CC_MCLK1_BCR 12
+#define CAM_CC_MCLK2_BCR 13
+#define CAM_CC_MCLK3_BCR 14
+#define CAM_CC_OPE_0_BCR 15
+#define CAM_CC_TFE_0_BCR 16
+#define CAM_CC_TFE_1_BCR 17
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-dispcc.h b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
new file mode 100644
index 000000000000..ca6f2ef90157
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP1_CLK 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT1_CLK 12
+#define DISP_CC_MDSS_MDP_LUT_CLK 13
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK 15
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
+#define DISP_CC_MDSS_ROT1_CLK 17
+#define DISP_CC_MDSS_ROT_CLK 18
+#define DISP_CC_MDSS_ROT_CLK_SRC 19
+#define DISP_CC_MDSS_RSCC_AHB_CLK 20
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 21
+#define DISP_CC_MDSS_VSYNC1_CLK 22
+#define DISP_CC_MDSS_VSYNC_CLK 23
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
+#define DISP_CC_PLL0 25
+#define DISP_CC_PLL1 26
+#define DISP_CC_SLEEP_CLK 27
+#define DISP_CC_SLEEP_CLK_SRC 28
+#define DISP_CC_XO_CLK 29
+#define DISP_CC_XO_CLK_SRC 30
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-gpucc.h b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
new file mode 100644
index 000000000000..304f83e5f645
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GFX3D_CLK 4
+#define GPU_CC_CX_GFX3D_SLV_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CX_SNOC_DVM_CLK 7
+#define GPU_CC_CXO_AON_CLK 8
+#define GPU_CC_CXO_CLK 9
+#define GPU_CC_DEMET_CLK 10
+#define GPU_CC_DEMET_DIV_CLK_SRC 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_CXO_CLK 15
+#define GPU_CC_GX_FF_CLK 16
+#define GPU_CC_GX_GFX3D_CLK 17
+#define GPU_CC_GX_GFX3D_CLK_SRC 18
+#define GPU_CC_GX_GFX3D_RDVM_CLK 19
+#define GPU_CC_GX_GMU_CLK 20
+#define GPU_CC_GX_VSENSE_CLK 21
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 22
+#define GPU_CC_HUB_AON_CLK 23
+#define GPU_CC_HUB_CLK_SRC 24
+#define GPU_CC_HUB_CX_INT_CLK 25
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 26
+#define GPU_CC_MEMNOC_GFX_CLK 27
+#define GPU_CC_MND1X_0_GFX3D_CLK 28
+#define GPU_CC_PLL0 29
+#define GPU_CC_PLL1 30
+#define GPU_CC_SLEEP_CLK 31
+#define GPU_CC_XO_CLK_SRC 32
+#define GPU_CC_XO_DIV_CLK_SRC 33
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_ACD_BCR 0
+#define GPU_CC_CB_BCR 1
+#define GPU_CC_CX_BCR 2
+#define GPU_CC_FAST_HUB_BCR 3
+#define GPU_CC_FF_BCR 4
+#define GPU_CC_GFX3D_AON_BCR 5
+#define GPU_CC_GMU_BCR 6
+#define GPU_CC_GX_BCR 7
+#define GPU_CC_XO_BCR 8
+#define GPU_CC_GX_ACD_IROOT_BCR 9
+#define GPU_CC_RBCPR_BCR 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8150-camcc.h b/include/dt-bindings/clock/qcom,sm8150-camcc.h
new file mode 100644
index 000000000000..5444035efa93
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8150-camcc.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_MAIN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_BPS_AHB_CLK 11
+#define CAM_CC_BPS_AREG_CLK 12
+#define CAM_CC_BPS_AXI_CLK 13
+#define CAM_CC_BPS_CLK 14
+#define CAM_CC_BPS_CLK_SRC 15
+#define CAM_CC_CAMNOC_AXI_CLK 16
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 17
+#define CAM_CC_CAMNOC_DCD_XO_CLK 18
+#define CAM_CC_CCI_0_CLK 19
+#define CAM_CC_CCI_0_CLK_SRC 20
+#define CAM_CC_CCI_1_CLK 21
+#define CAM_CC_CCI_1_CLK_SRC 22
+#define CAM_CC_CORE_AHB_CLK 23
+#define CAM_CC_CPAS_AHB_CLK 24
+#define CAM_CC_CPHY_RX_CLK_SRC 25
+#define CAM_CC_CSI0PHYTIMER_CLK 26
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI1PHYTIMER_CLK 28
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSI2PHYTIMER_CLK 30
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 31
+#define CAM_CC_CSI3PHYTIMER_CLK 32
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSIPHY0_CLK 34
+#define CAM_CC_CSIPHY1_CLK 35
+#define CAM_CC_CSIPHY2_CLK 36
+#define CAM_CC_CSIPHY3_CLK 37
+#define CAM_CC_FAST_AHB_CLK_SRC 38
+#define CAM_CC_FD_CORE_CLK 39
+#define CAM_CC_FD_CORE_CLK_SRC 40
+#define CAM_CC_FD_CORE_UAR_CLK 41
+#define CAM_CC_GDSC_CLK 42
+#define CAM_CC_ICP_AHB_CLK 43
+#define CAM_CC_ICP_CLK 44
+#define CAM_CC_ICP_CLK_SRC 45
+#define CAM_CC_IFE_0_AXI_CLK 46
+#define CAM_CC_IFE_0_CLK 47
+#define CAM_CC_IFE_0_CLK_SRC 48
+#define CAM_CC_IFE_0_CPHY_RX_CLK 49
+#define CAM_CC_IFE_0_CSID_CLK 50
+#define CAM_CC_IFE_0_CSID_CLK_SRC 51
+#define CAM_CC_IFE_0_DSP_CLK 52
+#define CAM_CC_IFE_1_AXI_CLK 53
+#define CAM_CC_IFE_1_CLK 54
+#define CAM_CC_IFE_1_CLK_SRC 55
+#define CAM_CC_IFE_1_CPHY_RX_CLK 56
+#define CAM_CC_IFE_1_CSID_CLK 57
+#define CAM_CC_IFE_1_CSID_CLK_SRC 58
+#define CAM_CC_IFE_1_DSP_CLK 59
+#define CAM_CC_IFE_LITE_0_CLK 60
+#define CAM_CC_IFE_LITE_0_CLK_SRC 61
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 62
+#define CAM_CC_IFE_LITE_0_CSID_CLK 63
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 64
+#define CAM_CC_IFE_LITE_1_CLK 65
+#define CAM_CC_IFE_LITE_1_CLK_SRC 66
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 67
+#define CAM_CC_IFE_LITE_1_CSID_CLK 68
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 69
+#define CAM_CC_IPE_0_AHB_CLK 70
+#define CAM_CC_IPE_0_AREG_CLK 71
+#define CAM_CC_IPE_0_AXI_CLK 72
+#define CAM_CC_IPE_0_CLK 73
+#define CAM_CC_IPE_0_CLK_SRC 74
+#define CAM_CC_IPE_1_AHB_CLK 75
+#define CAM_CC_IPE_1_AREG_CLK 76
+#define CAM_CC_IPE_1_AXI_CLK 77
+#define CAM_CC_IPE_1_CLK 78
+#define CAM_CC_JPEG_CLK 79
+#define CAM_CC_JPEG_CLK_SRC 80
+#define CAM_CC_LRME_CLK 81
+#define CAM_CC_LRME_CLK_SRC 82
+#define CAM_CC_MCLK0_CLK 83
+#define CAM_CC_MCLK0_CLK_SRC 84
+#define CAM_CC_MCLK1_CLK 85
+#define CAM_CC_MCLK1_CLK_SRC 86
+#define CAM_CC_MCLK2_CLK 87
+#define CAM_CC_MCLK2_CLK_SRC 88
+#define CAM_CC_MCLK3_CLK 89
+#define CAM_CC_MCLK3_CLK_SRC 90
+#define CAM_CC_SLOW_AHB_CLK_SRC 91
+
+/* CAM_CC power domains */
+#define TITAN_TOP_GDSC 0
+#define BPS_GDSC 1
+#define IFE_0_GDSC 2
+#define IFE_1_GDSC 3
+#define IPE_0_GDSC 4
+#define IPE_1_GDSC 5
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_CSI3PHY_BCR 7
+#define CAM_CC_FD_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_LITE_0_BCR 12
+#define CAM_CC_IFE_LITE_1_BCR 13
+#define CAM_CC_IPE_0_BCR 14
+#define CAM_CC_IPE_1_BCR 15
+#define CAM_CC_JPEG_BCR 16
+#define CAM_CC_LRME_BCR 17
+#define CAM_CC_MCLK0_BCR 18
+#define CAM_CC_MCLK1_BCR 19
+#define CAM_CC_MCLK2_BCR 20
+#define CAM_CC_MCLK3_BCR 21
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
index b0a668b395a5..c0a291188f28 100644..120000
--- a/include/dt-bindings/clock/qcom,sm8650-dispcc.h
+++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
@@ -1,102 +1 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
- * Copyright (c) 2023, Linaro Ltd.
- */
-
-#ifndef _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
-#define _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
-
-/* DISP_CC clocks */
-#define DISP_CC_MDSS_ACCU_CLK 0
-#define DISP_CC_MDSS_AHB1_CLK 1
-#define DISP_CC_MDSS_AHB_CLK 2
-#define DISP_CC_MDSS_AHB_CLK_SRC 3
-#define DISP_CC_MDSS_BYTE0_CLK 4
-#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
-#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
-#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
-#define DISP_CC_MDSS_BYTE1_CLK 8
-#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
-#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
-#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
-#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
-#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
-#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
-#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
-#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
-#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
-#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
-#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
-#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
-#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
-#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
-#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
-#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
-#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
-#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
-#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
-#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
-#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
-#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
-#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
-#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
-#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
-#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
-#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
-#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
-#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
-#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
-#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
-#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
-#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
-#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
-#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
-#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
-#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
-#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
-#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
-#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
-#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
-#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
-#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
-#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
-#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
-#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
-#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
-#define DISP_CC_MDSS_ESC0_CLK 56
-#define DISP_CC_MDSS_ESC0_CLK_SRC 57
-#define DISP_CC_MDSS_ESC1_CLK 58
-#define DISP_CC_MDSS_ESC1_CLK_SRC 59
-#define DISP_CC_MDSS_MDP1_CLK 60
-#define DISP_CC_MDSS_MDP_CLK 61
-#define DISP_CC_MDSS_MDP_CLK_SRC 62
-#define DISP_CC_MDSS_MDP_LUT1_CLK 63
-#define DISP_CC_MDSS_MDP_LUT_CLK 64
-#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
-#define DISP_CC_MDSS_PCLK0_CLK 66
-#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
-#define DISP_CC_MDSS_PCLK1_CLK 68
-#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
-#define DISP_CC_MDSS_RSCC_AHB_CLK 70
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
-#define DISP_CC_MDSS_VSYNC1_CLK 72
-#define DISP_CC_MDSS_VSYNC_CLK 73
-#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
-#define DISP_CC_PLL0 75
-#define DISP_CC_PLL1 76
-#define DISP_CC_SLEEP_CLK 77
-#define DISP_CC_SLEEP_CLK_SRC 78
-#define DISP_CC_XO_CLK 79
-#define DISP_CC_XO_CLK_SRC 80
-
-/* DISP_CC resets */
-#define DISP_CC_MDSS_CORE_BCR 0
-#define DISP_CC_MDSS_CORE_INT2_BCR 1
-#define DISP_CC_MDSS_RSCC_BCR 2
-
-/* DISP_CC GDSCR */
-#define MDSS_GDSC 0
-#define MDSS_INT2_GDSC 1
-
-#endif
+qcom,sm8550-dispcc.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/renesas,r9a09g057-cpg.h b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
new file mode 100644
index 000000000000..541e6d719bd6
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G057_SYS_0_PCLK 0
+#define R9A09G057_CA55_0_CORE_CLK0 1
+#define R9A09G057_CA55_0_CORE_CLK1 2
+#define R9A09G057_CA55_0_CORE_CLK2 3
+#define R9A09G057_CA55_0_CORE_CLK3 4
+#define R9A09G057_CA55_0_PERIPHCLK 5
+#define R9A09G057_CM33_CLK0 6
+#define R9A09G057_CST_0_SWCLKTCK 7
+#define R9A09G057_IOTOP_0_SHCLK 8
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__ */
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index a96a9870ad59..99cc617e1e54 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -94,8 +94,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index de550ea56eeb..138b6ce514dd 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -146,8 +146,6 @@
#define HCLK_S_CRYPTO 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index 33819acbfc56..c6034b01b050 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -195,8 +195,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h
index d97840f9ee2e..ce4cd72b9d3d 100644
--- a/include/dt-bindings/clock/rk3308-cru.h
+++ b/include/dt-bindings/clock/rk3308-cru.h
@@ -212,8 +212,6 @@
#define PCLK_CAN 233
#define PCLK_OWIRE 234
-#define CLK_NR_CLKS (PCLK_OWIRE + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index 555b4ff660ae..8885a2e98c65 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -201,8 +201,6 @@
#define HCLK_RGA 340
#define HCLK_HDCP 341
-#define CLK_NR_CLKS (HCLK_HDCP + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
index 83c72a163fd3..ebae3cbf8192 100644
--- a/include/dt-bindings/clock/rk3368-cru.h
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -182,8 +182,6 @@
#define HCLK_BUS 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE_B0 0
#define SRST_CORE_B1 1
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 39169d94a44e..4c90c7703a83 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -335,8 +335,6 @@
#define HCLK_SDIO_NOC 495
#define HCLK_SDIOAUDIO_NOC 496
-#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1)
-
/* pmu-clocks indices */
#define PLL_PPLL 1
@@ -378,8 +376,6 @@
#define PCLK_INTR_ARB_PMU 49
#define HCLK_NOC_PMU 50
-#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rockchip,rk3576-cru.h b/include/dt-bindings/clock/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..25aed298ac2c
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3576-cru.h
@@ -0,0 +1,592 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_BPLL 0
+#define PLL_LPLL 1
+#define PLL_VPLL 2
+#define PLL_AUPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_PPLL 6
+#define ARMCLK_L 7
+#define ARMCLK_B 8
+
+/* cru clocks */
+#define CLK_CPLL_DIV20 9
+#define CLK_CPLL_DIV10 10
+#define CLK_GPLL_DIV8 11
+#define CLK_GPLL_DIV6 12
+#define CLK_CPLL_DIV4 13
+#define CLK_GPLL_DIV4 14
+#define CLK_SPLL_DIV2 15
+#define CLK_GPLL_DIV3 16
+#define CLK_CPLL_DIV2 17
+#define CLK_GPLL_DIV2 18
+#define CLK_SPLL_DIV1 19
+#define PCLK_TOP_ROOT 20
+#define ACLK_TOP 21
+#define HCLK_TOP 22
+#define CLK_AUDIO_FRAC_0 23
+#define CLK_AUDIO_FRAC_1 24
+#define CLK_AUDIO_FRAC_2 25
+#define CLK_AUDIO_FRAC_3 26
+#define CLK_UART_FRAC_0 27
+#define CLK_UART_FRAC_1 28
+#define CLK_UART_FRAC_2 29
+#define CLK_UART1_SRC_TOP 30
+#define CLK_AUDIO_INT_0 31
+#define CLK_AUDIO_INT_1 32
+#define CLK_AUDIO_INT_2 33
+#define CLK_PDM0_SRC_TOP 34
+#define CLK_PDM1_OUT 35
+#define CLK_GMAC0_125M_SRC 36
+#define CLK_GMAC1_125M_SRC 37
+#define LCLK_ASRC_SRC_0 38
+#define LCLK_ASRC_SRC_1 39
+#define REF_CLK0_OUT_PLL 40
+#define REF_CLK1_OUT_PLL 41
+#define REF_CLK2_OUT_PLL 42
+#define REFCLKO25M_GMAC0_OUT 43
+#define REFCLKO25M_GMAC1_OUT 44
+#define CLK_CIFOUT_OUT 45
+#define CLK_GMAC0_RMII_CRU 46
+#define CLK_GMAC1_RMII_CRU 47
+#define CLK_OTPC_AUTO_RD_G 48
+#define CLK_OTP_PHY_G 49
+#define CLK_MIPI_CAMERAOUT_M0 50
+#define CLK_MIPI_CAMERAOUT_M1 51
+#define CLK_MIPI_CAMERAOUT_M2 52
+#define MCLK_PDM0_SRC_TOP 53
+#define HCLK_AUDIO_ROOT 54
+#define HCLK_ASRC_2CH_0 55
+#define HCLK_ASRC_2CH_1 56
+#define HCLK_ASRC_4CH_0 57
+#define HCLK_ASRC_4CH_1 58
+#define CLK_ASRC_2CH_0 59
+#define CLK_ASRC_2CH_1 60
+#define CLK_ASRC_4CH_0 61
+#define CLK_ASRC_4CH_1 62
+#define MCLK_SAI0_8CH_SRC 63
+#define MCLK_SAI0_8CH 64
+#define HCLK_SAI0_8CH 65
+#define HCLK_SPDIF_RX0 66
+#define MCLK_SPDIF_RX0 67
+#define HCLK_SPDIF_RX1 68
+#define MCLK_SPDIF_RX1 69
+#define MCLK_SAI1_8CH_SRC 70
+#define MCLK_SAI1_8CH 71
+#define HCLK_SAI1_8CH 72
+#define MCLK_SAI2_2CH_SRC 73
+#define MCLK_SAI2_2CH 74
+#define HCLK_SAI2_2CH 75
+#define MCLK_SAI3_2CH_SRC 76
+#define MCLK_SAI3_2CH 77
+#define HCLK_SAI3_2CH 78
+#define MCLK_SAI4_2CH_SRC 79
+#define MCLK_SAI4_2CH 80
+#define HCLK_SAI4_2CH 81
+#define HCLK_ACDCDIG_DSM 82
+#define MCLK_ACDCDIG_DSM 83
+#define CLK_PDM1 84
+#define HCLK_PDM1 85
+#define MCLK_PDM1 86
+#define HCLK_SPDIF_TX0 87
+#define MCLK_SPDIF_TX0 88
+#define HCLK_SPDIF_TX1 89
+#define MCLK_SPDIF_TX1 90
+#define CLK_SAI1_MCLKOUT 91
+#define CLK_SAI2_MCLKOUT 92
+#define CLK_SAI3_MCLKOUT 93
+#define CLK_SAI4_MCLKOUT 94
+#define CLK_SAI0_MCLKOUT 95
+#define HCLK_BUS_ROOT 96
+#define PCLK_BUS_ROOT 97
+#define ACLK_BUS_ROOT 98
+#define HCLK_CAN0 99
+#define CLK_CAN0 100
+#define HCLK_CAN1 101
+#define CLK_CAN1 102
+#define CLK_KEY_SHIFT 103
+#define PCLK_I2C1 104
+#define PCLK_I2C2 105
+#define PCLK_I2C3 106
+#define PCLK_I2C4 107
+#define PCLK_I2C5 108
+#define PCLK_I2C6 109
+#define PCLK_I2C7 110
+#define PCLK_I2C8 111
+#define PCLK_I2C9 112
+#define PCLK_WDT_BUSMCU 113
+#define TCLK_WDT_BUSMCU 114
+#define ACLK_GIC 115
+#define CLK_I2C1 116
+#define CLK_I2C2 117
+#define CLK_I2C3 118
+#define CLK_I2C4 119
+#define CLK_I2C5 120
+#define CLK_I2C6 121
+#define CLK_I2C7 122
+#define CLK_I2C8 123
+#define CLK_I2C9 124
+#define PCLK_SARADC 125
+#define CLK_SARADC 126
+#define PCLK_TSADC 127
+#define CLK_TSADC 128
+#define PCLK_UART0 129
+#define PCLK_UART2 130
+#define PCLK_UART3 131
+#define PCLK_UART4 132
+#define PCLK_UART5 133
+#define PCLK_UART6 134
+#define PCLK_UART7 135
+#define PCLK_UART8 136
+#define PCLK_UART9 137
+#define PCLK_UART10 138
+#define PCLK_UART11 139
+#define SCLK_UART0 140
+#define SCLK_UART2 141
+#define SCLK_UART3 142
+#define SCLK_UART4 143
+#define SCLK_UART5 144
+#define SCLK_UART6 145
+#define SCLK_UART7 146
+#define SCLK_UART8 147
+#define SCLK_UART9 148
+#define SCLK_UART10 149
+#define SCLK_UART11 150
+#define PCLK_SPI0 151
+#define PCLK_SPI1 152
+#define PCLK_SPI2 153
+#define PCLK_SPI3 154
+#define PCLK_SPI4 155
+#define CLK_SPI0 156
+#define CLK_SPI1 157
+#define CLK_SPI2 158
+#define CLK_SPI3 159
+#define CLK_SPI4 160
+#define PCLK_WDT0 161
+#define TCLK_WDT0 162
+#define PCLK_PWM1 163
+#define CLK_PWM1 164
+#define CLK_OSC_PWM1 165
+#define CLK_RC_PWM1 166
+#define PCLK_BUSTIMER0 167
+#define PCLK_BUSTIMER1 168
+#define CLK_TIMER0_ROOT 169
+#define CLK_TIMER0 170
+#define CLK_TIMER1 171
+#define CLK_TIMER2 172
+#define CLK_TIMER3 173
+#define CLK_TIMER4 174
+#define CLK_TIMER5 175
+#define PCLK_MAILBOX0 176
+#define PCLK_GPIO1 177
+#define DBCLK_GPIO1 178
+#define PCLK_GPIO2 179
+#define DBCLK_GPIO2 180
+#define PCLK_GPIO3 181
+#define DBCLK_GPIO3 182
+#define PCLK_GPIO4 183
+#define DBCLK_GPIO4 184
+#define ACLK_DECOM 185
+#define PCLK_DECOM 186
+#define DCLK_DECOM 187
+#define CLK_TIMER1_ROOT 188
+#define CLK_TIMER6 189
+#define CLK_TIMER7 190
+#define CLK_TIMER8 191
+#define CLK_TIMER9 192
+#define CLK_TIMER10 193
+#define CLK_TIMER11 194
+#define ACLK_DMAC0 195
+#define ACLK_DMAC1 196
+#define ACLK_DMAC2 197
+#define ACLK_SPINLOCK 198
+#define HCLK_I3C0 199
+#define HCLK_I3C1 200
+#define HCLK_BUS_CM0_ROOT 201
+#define FCLK_BUS_CM0_CORE 202
+#define CLK_BUS_CM0_RTC 203
+#define PCLK_PMU2 204
+#define PCLK_PWM2 205
+#define CLK_PWM2 206
+#define CLK_RC_PWM2 207
+#define CLK_OSC_PWM2 208
+#define CLK_FREQ_PWM1 209
+#define CLK_COUNTER_PWM1 210
+#define SAI_SCLKIN_FREQ 211
+#define SAI_SCLKIN_COUNTER 212
+#define CLK_I3C0 213
+#define CLK_I3C1 214
+#define PCLK_CSIDPHY1 215
+#define PCLK_DDR_ROOT 216
+#define PCLK_DDR_MON_CH0 217
+#define TMCLK_DDR_MON_CH0 218
+#define ACLK_DDR_ROOT 219
+#define HCLK_DDR_ROOT 220
+#define FCLK_DDR_CM0_CORE 221
+#define CLK_DDR_TIMER_ROOT 222
+#define CLK_DDR_TIMER0 223
+#define CLK_DDR_TIMER1 224
+#define TCLK_WDT_DDR 225
+#define PCLK_WDT 226
+#define PCLK_TIMER 227
+#define CLK_DDR_CM0_RTC 228
+#define ACLK_RKNN0 229
+#define ACLK_RKNN1 230
+#define HCLK_RKNN_ROOT 231
+#define CLK_RKNN_DSU0 232
+#define PCLK_NPUTOP_ROOT 233
+#define PCLK_NPU_TIMER 234
+#define CLK_NPUTIMER_ROOT 235
+#define CLK_NPUTIMER0 236
+#define CLK_NPUTIMER1 237
+#define PCLK_NPU_WDT 238
+#define TCLK_NPU_WDT 239
+#define ACLK_RKNN_CBUF 240
+#define HCLK_NPU_CM0_ROOT 241
+#define FCLK_NPU_CM0_CORE 242
+#define CLK_NPU_CM0_RTC 243
+#define HCLK_RKNN_CBUF 244
+#define HCLK_NVM_ROOT 245
+#define ACLK_NVM_ROOT 246
+#define SCLK_FSPI_X2 247
+#define HCLK_FSPI 248
+#define CCLK_SRC_EMMC 249
+#define HCLK_EMMC 250
+#define ACLK_EMMC 251
+#define BCLK_EMMC 252
+#define TCLK_EMMC 253
+#define PCLK_PHP_ROOT 254
+#define ACLK_PHP_ROOT 255
+#define PCLK_PCIE0 256
+#define CLK_PCIE0_AUX 257
+#define ACLK_PCIE0_MST 258
+#define ACLK_PCIE0_SLV 259
+#define ACLK_PCIE0_DBI 260
+#define ACLK_USB3OTG1 261
+#define CLK_REF_USB3OTG1 262
+#define CLK_SUSPEND_USB3OTG1 263
+#define ACLK_MMU0 264
+#define ACLK_SLV_MMU0 265
+#define ACLK_MMU1 266
+#define ACLK_SLV_MMU1 267
+#define PCLK_PCIE1 268
+#define CLK_PCIE1_AUX 269
+#define ACLK_PCIE1_MST 270
+#define ACLK_PCIE1_SLV 271
+#define ACLK_PCIE1_DBI 272
+#define CLK_RXOOB0 273
+#define CLK_RXOOB1 274
+#define CLK_PMALIVE0 275
+#define CLK_PMALIVE1 276
+#define ACLK_SATA0 277
+#define ACLK_SATA1 278
+#define CLK_USB3OTG1_PIPE_PCLK 279
+#define CLK_USB3OTG1_UTMI 280
+#define CLK_USB3OTG0_PIPE_PCLK 281
+#define CLK_USB3OTG0_UTMI 282
+#define HCLK_SDGMAC_ROOT 283
+#define ACLK_SDGMAC_ROOT 284
+#define PCLK_SDGMAC_ROOT 285
+#define ACLK_GMAC0 286
+#define ACLK_GMAC1 287
+#define PCLK_GMAC0 288
+#define PCLK_GMAC1 289
+#define CCLK_SRC_SDIO 290
+#define HCLK_SDIO 291
+#define CLK_GMAC1_PTP_REF 292
+#define CLK_GMAC0_PTP_REF 293
+#define CLK_GMAC1_PTP_REF_SRC 294
+#define CLK_GMAC0_PTP_REF_SRC 295
+#define CCLK_SRC_SDMMC0 296
+#define HCLK_SDMMC0 297
+#define SCLK_FSPI1_X2 298
+#define HCLK_FSPI1 299
+#define ACLK_DSMC_ROOT 300
+#define ACLK_DSMC 301
+#define PCLK_DSMC 302
+#define CLK_DSMC_SYS 303
+#define HCLK_HSGPIO 304
+#define CLK_HSGPIO_TX 305
+#define CLK_HSGPIO_RX 306
+#define ACLK_HSGPIO 307
+#define PCLK_PHPPHY_ROOT 308
+#define PCLK_PCIE2_COMBOPHY0 309
+#define PCLK_PCIE2_COMBOPHY1 310
+#define CLK_PCIE_100M_SRC 311
+#define CLK_PCIE_100M_NDUTY_SRC 312
+#define CLK_REF_PCIE0_PHY 313
+#define CLK_REF_PCIE1_PHY 314
+#define CLK_REF_MPHY_26M 315
+#define HCLK_RKVDEC_ROOT 316
+#define ACLK_RKVDEC_ROOT 317
+#define HCLK_RKVDEC 318
+#define CLK_RKVDEC_HEVC_CA 319
+#define CLK_RKVDEC_CORE 320
+#define ACLK_UFS_ROOT 321
+#define ACLK_USB_ROOT 322
+#define PCLK_USB_ROOT 323
+#define ACLK_USB3OTG0 324
+#define CLK_REF_USB3OTG0 325
+#define CLK_SUSPEND_USB3OTG0 326
+#define ACLK_MMU2 327
+#define ACLK_SLV_MMU2 328
+#define ACLK_UFS_SYS 329
+#define ACLK_VPU_ROOT 330
+#define ACLK_VPU_MID_ROOT 331
+#define HCLK_VPU_ROOT 332
+#define ACLK_JPEG_ROOT 333
+#define ACLK_VPU_LOW_ROOT 334
+#define HCLK_RGA2E_0 335
+#define ACLK_RGA2E_0 336
+#define CLK_CORE_RGA2E_0 337
+#define ACLK_JPEG 338
+#define HCLK_JPEG 339
+#define HCLK_VDPP 340
+#define ACLK_VDPP 341
+#define CLK_CORE_VDPP 342
+#define HCLK_RGA2E_1 343
+#define ACLK_RGA2E_1 344
+#define CLK_CORE_RGA2E_1 345
+#define DCLK_EBC_FRAC_SRC 346
+#define HCLK_EBC 347
+#define ACLK_EBC 348
+#define DCLK_EBC 349
+#define HCLK_VEPU0_ROOT 350
+#define ACLK_VEPU0_ROOT 351
+#define HCLK_VEPU0 352
+#define ACLK_VEPU0 353
+#define CLK_VEPU0_CORE 354
+#define ACLK_VI_ROOT 355
+#define HCLK_VI_ROOT 356
+#define PCLK_VI_ROOT 357
+#define DCLK_VICAP 358
+#define ACLK_VICAP 359
+#define HCLK_VICAP 360
+#define CLK_ISP_CORE 361
+#define CLK_ISP_CORE_MARVIN 362
+#define CLK_ISP_CORE_VICAP 363
+#define ACLK_ISP 364
+#define HCLK_ISP 365
+#define ACLK_VPSS 366
+#define HCLK_VPSS 367
+#define CLK_CORE_VPSS 368
+#define PCLK_CSI_HOST_0 369
+#define PCLK_CSI_HOST_1 370
+#define PCLK_CSI_HOST_2 371
+#define PCLK_CSI_HOST_3 372
+#define PCLK_CSI_HOST_4 373
+#define ICLK_CSIHOST01 374
+#define ICLK_CSIHOST0 375
+#define CLK_ISP_PVTPLL_SRC 376
+#define ACLK_VI_ROOT_INTER 377
+#define CLK_VICAP_I0CLK 378
+#define CLK_VICAP_I1CLK 379
+#define CLK_VICAP_I2CLK 380
+#define CLK_VICAP_I3CLK 381
+#define CLK_VICAP_I4CLK 382
+#define ACLK_VOP_ROOT 383
+#define HCLK_VOP_ROOT 384
+#define PCLK_VOP_ROOT 385
+#define HCLK_VOP 386
+#define ACLK_VOP 387
+#define DCLK_VP0_SRC 388
+#define DCLK_VP1_SRC 389
+#define DCLK_VP2_SRC 390
+#define DCLK_VP0 391
+#define DCLK_VP1 392
+#define DCLK_VP2 393
+#define PCLK_VOPGRF 394
+#define ACLK_VO0_ROOT 395
+#define HCLK_VO0_ROOT 396
+#define PCLK_VO0_ROOT 397
+#define PCLK_VO0_GRF 398
+#define ACLK_HDCP0 399
+#define HCLK_HDCP0 400
+#define PCLK_HDCP0 401
+#define CLK_TRNG0_SKP 402
+#define PCLK_DSIHOST0 403
+#define CLK_DSIHOST0 404
+#define PCLK_HDMITX0 405
+#define CLK_HDMITX0_EARC 406
+#define CLK_HDMITX0_REF 407
+#define PCLK_EDP0 408
+#define CLK_EDP0_24M 409
+#define CLK_EDP0_200M 410
+#define MCLK_SAI5_8CH_SRC 411
+#define MCLK_SAI5_8CH 412
+#define HCLK_SAI5_8CH 413
+#define MCLK_SAI6_8CH_SRC 414
+#define MCLK_SAI6_8CH 415
+#define HCLK_SAI6_8CH 416
+#define HCLK_SPDIF_TX2 417
+#define MCLK_SPDIF_TX2 418
+#define HCLK_SPDIF_RX2 419
+#define MCLK_SPDIF_RX2 420
+#define HCLK_SAI8_8CH 421
+#define MCLK_SAI8_8CH_SRC 422
+#define MCLK_SAI8_8CH 423
+#define ACLK_VO1_ROOT 424
+#define HCLK_VO1_ROOT 425
+#define PCLK_VO1_ROOT 426
+#define MCLK_SAI7_8CH_SRC 427
+#define MCLK_SAI7_8CH 428
+#define HCLK_SAI7_8CH 429
+#define HCLK_SPDIF_TX3 430
+#define HCLK_SPDIF_TX4 431
+#define HCLK_SPDIF_TX5 432
+#define MCLK_SPDIF_TX3 433
+#define CLK_AUX16MHZ_0 434
+#define ACLK_DP0 435
+#define PCLK_DP0 436
+#define PCLK_VO1_GRF 437
+#define ACLK_HDCP1 438
+#define HCLK_HDCP1 439
+#define PCLK_HDCP1 440
+#define CLK_TRNG1_SKP 441
+#define HCLK_SAI9_8CH 442
+#define MCLK_SAI9_8CH_SRC 443
+#define MCLK_SAI9_8CH 444
+#define MCLK_SPDIF_TX4 445
+#define MCLK_SPDIF_TX5 446
+#define CLK_GPU_SRC_PRE 447
+#define CLK_GPU 448
+#define PCLK_GPU_ROOT 449
+#define ACLK_CENTER_ROOT 450
+#define ACLK_CENTER_LOW_ROOT 451
+#define HCLK_CENTER_ROOT 452
+#define PCLK_CENTER_ROOT 453
+#define ACLK_DMA2DDR 454
+#define ACLK_DDR_SHAREMEM 455
+#define PCLK_DMA2DDR 456
+#define PCLK_SHAREMEM 457
+#define HCLK_VEPU1_ROOT 458
+#define ACLK_VEPU1_ROOT 459
+#define HCLK_VEPU1 460
+#define ACLK_VEPU1 461
+#define CLK_VEPU1_CORE 462
+#define CLK_JDBCK_DAP 463
+#define PCLK_MIPI_DCPHY 464
+#define CLK_32K_USB2DEBUG 465
+#define PCLK_CSIDPHY 466
+#define PCLK_USBDPPHY 467
+#define CLK_PMUPHY_REF_SRC 468
+#define CLK_USBDP_COMBO_PHY_IMMORTAL 469
+#define CLK_HDMITXHDP 470
+#define PCLK_MPHY 471
+#define CLK_REF_OSC_MPHY 472
+#define CLK_REF_UFS_CLKOUT 473
+#define HCLK_PMU1_ROOT 474
+#define HCLK_PMU_CM0_ROOT 475
+#define CLK_200M_PMU_SRC 476
+#define CLK_100M_PMU_SRC 477
+#define CLK_50M_PMU_SRC 478
+#define FCLK_PMU_CM0_CORE 479
+#define CLK_PMU_CM0_RTC 480
+#define PCLK_PMU1 481
+#define CLK_PMU1 482
+#define PCLK_PMU1WDT 483
+#define TCLK_PMU1WDT 484
+#define PCLK_PMUTIMER 485
+#define CLK_PMUTIMER_ROOT 486
+#define CLK_PMUTIMER0 487
+#define CLK_PMUTIMER1 488
+#define PCLK_PMU1PWM 489
+#define CLK_PMU1PWM 490
+#define CLK_PMU1PWM_OSC 491
+#define PCLK_PMUPHY_ROOT 492
+#define PCLK_I2C0 493
+#define CLK_I2C0 494
+#define SCLK_UART1 495
+#define PCLK_UART1 496
+#define CLK_PMU1PWM_RC 497
+#define CLK_PDM0 498
+#define HCLK_PDM0 499
+#define MCLK_PDM0 500
+#define HCLK_VAD 501
+#define CLK_OSCCHK_PVTM 502
+#define CLK_PDM0_OUT 503
+#define CLK_HPTIMER_SRC 504
+#define PCLK_PMU0_ROOT 505
+#define PCLK_PMU0 506
+#define PCLK_GPIO0 507
+#define DBCLK_GPIO0 508
+#define CLK_OSC0_PMU1 509
+#define PCLK_PMU1_ROOT 510
+#define XIN_OSC0_DIV 511
+#define ACLK_USB 512
+#define ACLK_UFS 513
+#define ACLK_SDGMAC 514
+#define HCLK_SDGMAC 515
+#define PCLK_SDGMAC 516
+#define HCLK_VO1 517
+#define HCLK_VO0 518
+#define PCLK_CCI_ROOT 519
+#define ACLK_CCI_ROOT 520
+#define HCLK_VO0VOP_CHANNEL 521
+#define ACLK_VO0VOP_CHANNEL 522
+#define ACLK_TOP_MID 523
+#define ACLK_SECURE_HIGH 524
+#define CLK_USBPHY_REF_SRC 525
+#define CLK_PHY_REF_SRC 526
+#define CLK_CPLL_REF_SRC 527
+#define CLK_AUPLL_REF_SRC 528
+#define PCLK_SECURE_NS 529
+#define HCLK_SECURE_NS 530
+#define ACLK_SECURE_NS 531
+#define PCLK_OTPC_NS 532
+#define HCLK_CRYPTO_NS 533
+#define HCLK_TRNG_NS 534
+#define CLK_OTPC_NS 535
+#define SCLK_DSU 536
+#define SCLK_DDR 537
+#define ACLK_CRYPTO_NS 538
+#define CLK_PKA_CRYPTO_NS 539
+#define ACLK_RKVDEC_ROOT_BAK 540
+#define CLK_AUDIO_FRAC_0_SRC 541
+#define CLK_AUDIO_FRAC_1_SRC 542
+#define CLK_AUDIO_FRAC_2_SRC 543
+#define CLK_AUDIO_FRAC_3_SRC 544
+#define PCLK_HDPTX_APB 545
+
+/* secure clk */
+#define CLK_STIMER0_ROOT 546
+#define CLK_STIMER1_ROOT 547
+#define PCLK_SECURE_S 548
+#define HCLK_SECURE_S 549
+#define ACLK_SECURE_S 550
+#define CLK_PKA_CRYPTO_S 551
+#define HCLK_VO1_S 552
+#define PCLK_VO1_S 553
+#define HCLK_VO0_S 554
+#define PCLK_VO0_S 555
+#define PCLK_KLAD 556
+#define HCLK_CRYPTO_S 557
+#define HCLK_KLAD 558
+#define ACLK_CRYPTO_S 559
+#define HCLK_TRNG_S 560
+#define PCLK_OTPC_S 561
+#define CLK_OTPC_S 562
+#define PCLK_WDT_S 563
+#define TCLK_WDT_S 564
+#define PCLK_HDCP0_TRNG 565
+#define PCLK_HDCP1_TRNG 566
+#define HCLK_HDCP_KEY0 567
+#define HCLK_HDCP_KEY1 568
+#define PCLK_EDP_S 569
+#define ACLK_KLAD 570
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
index 3065375c2d8b..ce8fb8f7d718 100644
--- a/include/dt-bindings/clock/samsung,exynosautov9.h
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -179,6 +179,17 @@
#define CLK_GOUT_CORE_CCI_PCLK 4
#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
+/* CMU_DPUM */
+#define CLK_MOUT_DPUM_BUS_USER 1
+#define CLK_DOUT_DPUM_BUSP 2
+#define CLK_GOUT_DPUM_ACLK_DECON 3
+#define CLK_GOUT_DPUM_ACLK_DMA 4
+#define CLK_GOUT_DPUM_ACLK_DPP 5
+#define CLK_GOUT_DPUM_SYSMMU_D0_CLK 6
+#define CLK_GOUT_DPUM_SYSMMU_D1_CLK 7
+#define CLK_GOUT_DPUM_SYSMMU_D2_CLK 8
+#define CLK_GOUT_DPUM_SYSMMU_D3_CLK 9
+
/* CMU_FSYS0 */
#define CLK_MOUT_FSYS0_BUS_USER 1
#define CLK_MOUT_FSYS0_PCIE_USER 2
diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h
new file mode 100644
index 000000000000..c720f344b6bf
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynosautov920.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Sunyeal Hong <sunyeal.hong@samsung.com>
+ *
+ * Device Tree binding constants for ExynosAuto v920 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+
+/* CMU_TOP */
+#define FOUT_SHARED0_PLL 1
+#define FOUT_SHARED1_PLL 2
+#define FOUT_SHARED2_PLL 3
+#define FOUT_SHARED3_PLL 4
+#define FOUT_SHARED4_PLL 5
+#define FOUT_SHARED5_PLL 6
+#define FOUT_MMC_PLL 7
+
+/* MUX in CMU_TOP */
+#define MOUT_SHARED0_PLL 8
+#define MOUT_SHARED1_PLL 9
+#define MOUT_SHARED2_PLL 10
+#define MOUT_SHARED3_PLL 11
+#define MOUT_SHARED4_PLL 12
+#define MOUT_SHARED5_PLL 13
+#define MOUT_MMC_PLL 14
+#define MOUT_CLKCMU_CMU_BOOST 15
+#define MOUT_CLKCMU_CMU_CMUREF 16
+#define MOUT_CLKCMU_ACC_NOC 17
+#define MOUT_CLKCMU_ACC_ORB 18
+#define MOUT_CLKCMU_APM_NOC 19
+#define MOUT_CLKCMU_AUD_CPU 20
+#define MOUT_CLKCMU_AUD_NOC 21
+#define MOUT_CLKCMU_CPUCL0_SWITCH 22
+#define MOUT_CLKCMU_CPUCL0_CLUSTER 23
+#define MOUT_CLKCMU_CPUCL0_DBG 24
+#define MOUT_CLKCMU_CPUCL1_SWITCH 25
+#define MOUT_CLKCMU_CPUCL1_CLUSTER 26
+#define MOUT_CLKCMU_CPUCL2_SWITCH 27
+#define MOUT_CLKCMU_CPUCL2_CLUSTER 28
+#define MOUT_CLKCMU_DNC_NOC 29
+#define MOUT_CLKCMU_DPTX_NOC 30
+#define MOUT_CLKCMU_DPTX_DPGTC 31
+#define MOUT_CLKCMU_DPTX_DPOSC 32
+#define MOUT_CLKCMU_DPUB_NOC 33
+#define MOUT_CLKCMU_DPUB_DSIM 34
+#define MOUT_CLKCMU_DPUF0_NOC 35
+#define MOUT_CLKCMU_DPUF1_NOC 36
+#define MOUT_CLKCMU_DPUF2_NOC 37
+#define MOUT_CLKCMU_DSP_NOC 38
+#define MOUT_CLKCMU_G3D_SWITCH 39
+#define MOUT_CLKCMU_G3D_NOCP 40
+#define MOUT_CLKCMU_GNPU_NOC 41
+#define MOUT_CLKCMU_HSI0_NOC 42
+#define MOUT_CLKCMU_HSI1_NOC 43
+#define MOUT_CLKCMU_HSI1_USBDRD 44
+#define MOUT_CLKCMU_HSI1_MMC_CARD 45
+#define MOUT_CLKCMU_HSI2_NOC 46
+#define MOUT_CLKCMU_HSI2_NOC_UFS 47
+#define MOUT_CLKCMU_HSI2_UFS_EMBD 48
+#define MOUT_CLKCMU_HSI2_ETHERNET 49
+#define MOUT_CLKCMU_ISP_NOC 50
+#define MOUT_CLKCMU_M2M_NOC 51
+#define MOUT_CLKCMU_M2M_JPEG 52
+#define MOUT_CLKCMU_MFC_MFC 53
+#define MOUT_CLKCMU_MFC_WFD 54
+#define MOUT_CLKCMU_MFD_NOC 55
+#define MOUT_CLKCMU_MIF_SWITCH 56
+#define MOUT_CLKCMU_MIF_NOCP 57
+#define MOUT_CLKCMU_MISC_NOC 58
+#define MOUT_CLKCMU_NOCL0_NOC 59
+#define MOUT_CLKCMU_NOCL1_NOC 60
+#define MOUT_CLKCMU_NOCL2_NOC 61
+#define MOUT_CLKCMU_PERIC0_NOC 62
+#define MOUT_CLKCMU_PERIC0_IP 63
+#define MOUT_CLKCMU_PERIC1_NOC 64
+#define MOUT_CLKCMU_PERIC1_IP 65
+#define MOUT_CLKCMU_SDMA_NOC 66
+#define MOUT_CLKCMU_SNW_NOC 67
+#define MOUT_CLKCMU_SSP_NOC 68
+#define MOUT_CLKCMU_TAA_NOC 69
+
+/* DIV in CMU_TOP */
+#define DOUT_SHARED0_DIV1 70
+#define DOUT_SHARED0_DIV2 71
+#define DOUT_SHARED0_DIV3 72
+#define DOUT_SHARED0_DIV4 73
+#define DOUT_SHARED1_DIV1 74
+#define DOUT_SHARED1_DIV2 75
+#define DOUT_SHARED1_DIV3 76
+#define DOUT_SHARED1_DIV4 77
+#define DOUT_SHARED2_DIV1 78
+#define DOUT_SHARED2_DIV2 79
+#define DOUT_SHARED2_DIV3 80
+#define DOUT_SHARED2_DIV4 81
+#define DOUT_SHARED3_DIV1 82
+#define DOUT_SHARED3_DIV2 83
+#define DOUT_SHARED3_DIV3 84
+#define DOUT_SHARED3_DIV4 85
+#define DOUT_SHARED4_DIV1 86
+#define DOUT_SHARED4_DIV2 87
+#define DOUT_SHARED4_DIV3 88
+#define DOUT_SHARED4_DIV4 89
+#define DOUT_SHARED5_DIV1 90
+#define DOUT_SHARED5_DIV2 91
+#define DOUT_SHARED5_DIV3 92
+#define DOUT_SHARED5_DIV4 93
+#define DOUT_CLKCMU_CMU_BOOST 94
+#define DOUT_CLKCMU_ACC_NOC 95
+#define DOUT_CLKCMU_ACC_ORB 96
+#define DOUT_CLKCMU_APM_NOC 97
+#define DOUT_CLKCMU_AUD_CPU 98
+#define DOUT_CLKCMU_AUD_NOC 99
+#define DOUT_CLKCMU_CPUCL0_SWITCH 100
+#define DOUT_CLKCMU_CPUCL0_CLUSTER 101
+#define DOUT_CLKCMU_CPUCL0_DBG 102
+#define DOUT_CLKCMU_CPUCL1_SWITCH 103
+#define DOUT_CLKCMU_CPUCL1_CLUSTER 104
+#define DOUT_CLKCMU_CPUCL2_SWITCH 105
+#define DOUT_CLKCMU_CPUCL2_CLUSTER 106
+#define DOUT_CLKCMU_DNC_NOC 107
+#define DOUT_CLKCMU_DPTX_NOC 108
+#define DOUT_CLKCMU_DPTX_DPGTC 109
+#define DOUT_CLKCMU_DPTX_DPOSC 110
+#define DOUT_CLKCMU_DPUB_NOC 111
+#define DOUT_CLKCMU_DPUB_DSIM 112
+#define DOUT_CLKCMU_DPUF0_NOC 113
+#define DOUT_CLKCMU_DPUF1_NOC 114
+#define DOUT_CLKCMU_DPUF2_NOC 115
+#define DOUT_CLKCMU_DSP_NOC 116
+#define DOUT_CLKCMU_G3D_SWITCH 117
+#define DOUT_CLKCMU_G3D_NOCP 118
+#define DOUT_CLKCMU_GNPU_NOC 119
+#define DOUT_CLKCMU_HSI0_NOC 120
+#define DOUT_CLKCMU_HSI1_NOC 121
+#define DOUT_CLKCMU_HSI1_USBDRD 122
+#define DOUT_CLKCMU_HSI1_MMC_CARD 123
+#define DOUT_CLKCMU_HSI2_NOC 124
+#define DOUT_CLKCMU_HSI2_NOC_UFS 125
+#define DOUT_CLKCMU_HSI2_UFS_EMBD 126
+#define DOUT_CLKCMU_HSI2_ETHERNET 127
+#define DOUT_CLKCMU_ISP_NOC 128
+#define DOUT_CLKCMU_M2M_NOC 129
+#define DOUT_CLKCMU_M2M_JPEG 130
+#define DOUT_CLKCMU_MFC_MFC 131
+#define DOUT_CLKCMU_MFC_WFD 132
+#define DOUT_CLKCMU_MFD_NOC 133
+#define DOUT_CLKCMU_MIF_NOCP 134
+#define DOUT_CLKCMU_MISC_NOC 135
+#define DOUT_CLKCMU_NOCL0_NOC 136
+#define DOUT_CLKCMU_NOCL1_NOC 137
+#define DOUT_CLKCMU_NOCL2_NOC 138
+#define DOUT_CLKCMU_PERIC0_NOC 139
+#define DOUT_CLKCMU_PERIC0_IP 140
+#define DOUT_CLKCMU_PERIC1_NOC 141
+#define DOUT_CLKCMU_PERIC1_IP 142
+#define DOUT_CLKCMU_SDMA_NOC 143
+#define DOUT_CLKCMU_SNW_NOC 144
+#define DOUT_CLKCMU_SSP_NOC 145
+#define DOUT_CLKCMU_TAA_NOC 146
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_IP_USER 1
+#define CLK_MOUT_PERIC0_NOC_USER 2
+#define CLK_MOUT_PERIC0_USI00_USI 3
+#define CLK_MOUT_PERIC0_USI01_USI 4
+#define CLK_MOUT_PERIC0_USI02_USI 5
+#define CLK_MOUT_PERIC0_USI03_USI 6
+#define CLK_MOUT_PERIC0_USI04_USI 7
+#define CLK_MOUT_PERIC0_USI05_USI 8
+#define CLK_MOUT_PERIC0_USI06_USI 9
+#define CLK_MOUT_PERIC0_USI07_USI 10
+#define CLK_MOUT_PERIC0_USI08_USI 11
+#define CLK_MOUT_PERIC0_USI_I2C 12
+#define CLK_MOUT_PERIC0_I3C 13
+
+#define CLK_DOUT_PERIC0_USI00_USI 14
+#define CLK_DOUT_PERIC0_USI01_USI 15
+#define CLK_DOUT_PERIC0_USI02_USI 16
+#define CLK_DOUT_PERIC0_USI03_USI 17
+#define CLK_DOUT_PERIC0_USI04_USI 18
+#define CLK_DOUT_PERIC0_USI05_USI 19
+#define CLK_DOUT_PERIC0_USI06_USI 20
+#define CLK_DOUT_PERIC0_USI07_USI 21
+#define CLK_DOUT_PERIC0_USI08_USI 22
+#define CLK_DOUT_PERIC0_USI_I2C 23
+#define CLK_DOUT_PERIC0_I3C 24
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */
diff --git a/include/dt-bindings/iio/adi,ad4695.h b/include/dt-bindings/iio/adi,ad4695.h
new file mode 100644
index 000000000000..9fbef542bf67
--- /dev/null
+++ b/include/dt-bindings/iio/adi,ad4695.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD4695_H
+#define _DT_BINDINGS_ADI_AD4695_H
+
+#define AD4695_COMMON_MODE_REFGND 0xFF
+#define AD4695_COMMON_MODE_COM 0xFE
+
+#endif /* _DT_BINDINGS_ADI_AD4695_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq5332.h b/include/dt-bindings/interconnect/qcom,ipq5332.h
new file mode 100644
index 000000000000..16475bb07a48
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq5332.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ5332_H
+#define INTERCONNECT_QCOM_IPQ5332_H
+
+#define MASTER_SNOC_PCIE3_1_M 0
+#define SLAVE_SNOC_PCIE3_1_M 1
+#define MASTER_ANOC_PCIE3_1_S 2
+#define SLAVE_ANOC_PCIE3_1_S 3
+#define MASTER_SNOC_PCIE3_2_M 4
+#define SLAVE_SNOC_PCIE3_2_M 5
+#define MASTER_ANOC_PCIE3_2_S 6
+#define SLAVE_ANOC_PCIE3_2_S 7
+#define MASTER_SNOC_USB 8
+#define SLAVE_SNOC_USB 9
+#define MASTER_NSSNOC_NSSCC 10
+#define SLAVE_NSSNOC_NSSCC 11
+#define MASTER_NSSNOC_SNOC_0 12
+#define SLAVE_NSSNOC_SNOC_0 13
+#define MASTER_NSSNOC_SNOC_1 14
+#define SLAVE_NSSNOC_SNOC_1 15
+#define MASTER_NSSNOC_ATB 16
+#define SLAVE_NSSNOC_ATB 17
+#define MASTER_NSSNOC_PCNOC_1 18
+#define SLAVE_NSSNOC_PCNOC_1 19
+#define MASTER_NSSNOC_QOSGEN_REF 20
+#define SLAVE_NSSNOC_QOSGEN_REF 21
+#define MASTER_NSSNOC_TIMEOUT_REF 22
+#define SLAVE_NSSNOC_TIMEOUT_REF 23
+#define MASTER_NSSNOC_XO_DCD 24
+#define SLAVE_NSSNOC_XO_DCD 25
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_CE_APB 6
+#define SLAVE_NSSNOC_CE_APB 7
+#define MASTER_NSSNOC_CE_AXI 8
+#define SLAVE_NSSNOC_CE_AXI 9
+
+#define MASTER_CNOC_AHB 0
+#define SLAVE_CNOC_AHB 1
+
+#endif /* INTERCONNECT_QCOM_IPQ5332_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8937.h b/include/dt-bindings/interconnect/qcom,msm8937.h
new file mode 100644
index 000000000000..98b8a4637aab
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8937.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8937 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_2 3
+#define MAS_SNOC_BIMC_1 4
+#define MAS_TCU_0 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_SPDM 0
+#define MAS_BLSP_1 1
+#define MAS_BLSP_2 2
+#define MAS_USB_HS1 3
+#define MAS_XI_USB_HS1 4
+#define MAS_CRYPTO 5
+#define MAS_SDCC_1 6
+#define MAS_SDCC_2 7
+#define MAS_SNOC_PCNOC 8
+#define PCNOC_M_0 9
+#define PCNOC_M_1 10
+#define PCNOC_INT_0 11
+#define PCNOC_INT_1 12
+#define PCNOC_INT_2 13
+#define PCNOC_INT_3 14
+#define PCNOC_S_0 15
+#define PCNOC_S_1 16
+#define PCNOC_S_2 17
+#define PCNOC_S_3 18
+#define PCNOC_S_4 19
+#define PCNOC_S_6 20
+#define PCNOC_S_7 21
+#define PCNOC_S_8 22
+#define SLV_SDCC_2 23
+#define SLV_SPDM 24
+#define SLV_PDM 25
+#define SLV_PRNG 26
+#define SLV_TCSR 27
+#define SLV_SNOC_CFG 28
+#define SLV_MESSAGE_RAM 29
+#define SLV_CAMERA_SS_CFG 30
+#define SLV_DISP_SS_CFG 31
+#define SLV_VENUS_CFG 32
+#define SLV_GPU_CFG 33
+#define SLV_TLMM 34
+#define SLV_BLSP_1 35
+#define SLV_BLSP_2 36
+#define SLV_PMIC_ARB 37
+#define SLV_SDCC_1 38
+#define SLV_CRYPTO_0_CFG 39
+#define SLV_USB_HS 40
+#define SLV_TCU 41
+#define SLV_PCNOC_SNOC 42
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define QDSS_INT 4
+#define SNOC_INT_0 5
+#define SNOC_INT_1 6
+#define SNOC_INT_2 7
+#define SLV_KPSS_AHB 8
+#define SLV_WCSS 9
+#define SLV_SNOC_BIMC_1 10
+#define SLV_IMEM 11
+#define SLV_SNOC_PCNOC 12
+#define SLV_QDSS_STM 13
+#define SLV_CATS_1 14
+#define SLV_LPASS 15
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_MDP 1
+#define MAS_VENUS 2
+#define MAS_VFE0 3
+#define MAS_VFE1 4
+#define MAS_CPP 5
+#define SLV_SNOC_BIMC_0 6
+#define SLV_SNOC_BIMC_2 7
+#define SLV_CATS_0 8
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8976.h b/include/dt-bindings/interconnect/qcom,msm8976.h
new file mode 100644
index 000000000000..4ea90f22320e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8976.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8976 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_SMMNOC_BIMC 1
+#define MAS_SNOC_BIMC 2
+#define MAS_TCU_0 3
+#define SLV_EBI 4
+#define SLV_BIMC_SNOC 5
+
+/* PCNOC fabric */
+#define MAS_USB_HS2 0
+#define MAS_BLSP_1 1
+#define MAS_USB_HS1 2
+#define MAS_BLSP_2 3
+#define MAS_CRYPTO 4
+#define MAS_SDCC_1 5
+#define MAS_SDCC_2 6
+#define MAS_SDCC_3 7
+#define MAS_SNOC_PCNOC 8
+#define MAS_LPASS_AHB 9
+#define MAS_SPDM 10
+#define MAS_DEHR 11
+#define MAS_XM_USB_HS1 12
+#define PCNOC_M_0 13
+#define PCNOC_M_1 14
+#define PCNOC_INT_0 15
+#define PCNOC_INT_1 16
+#define PCNOC_INT_2 17
+#define PCNOC_S_1 18
+#define PCNOC_S_2 19
+#define PCNOC_S_3 20
+#define PCNOC_S_4 21
+#define PCNOC_S_8 22
+#define PCNOC_S_9 23
+#define SLV_TCSR 24
+#define SLV_TLMM 25
+#define SLV_CRYPTO_0_CFG 26
+#define SLV_MESSAGE_RAM 27
+#define SLV_PDM 28
+#define SLV_PRNG 29
+#define SLV_PMIC_ARB 30
+#define SLV_SNOC_CFG 31
+#define SLV_DCC_CFG 32
+#define SLV_CAMERA_SS_CFG 33
+#define SLV_DISP_SS_CFG 34
+#define SLV_VENUS_CFG 35
+#define SLV_SDCC_1 36
+#define SLV_BLSP_1 37
+#define SLV_USB_HS 38
+#define SLV_SDCC_3 39
+#define SLV_SDCC_2 40
+#define SLV_GPU_CFG 41
+#define SLV_USB_HS2 42
+#define SLV_BLSP_2 43
+#define SLV_PCNOC_SNOC 44
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define MAS_LPASS_PROC 4
+#define MAS_IPA 5
+#define QDSS_INT 6
+#define SNOC_INT_0 7
+#define SNOC_INT_1 8
+#define SNOC_INT_2 9
+#define SLV_KPSS_AHB 10
+#define SLV_SNOC_BIMC 11
+#define SLV_IMEM 12
+#define SLV_SNOC_PCNOC 13
+#define SLV_QDSS_STM 14
+#define SLV_CATS_0 15
+#define SLV_CATS_1 16
+#define SLV_LPASS 17
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_OXILI 1
+#define MAS_MDP0 2
+#define MAS_MDP1 3
+#define MAS_VENUS_0 4
+#define MAS_VENUS_1 5
+#define MAS_VFE_0 6
+#define MAS_VFE_1 7
+#define MAS_CPP 8
+#define MM_INT_0 9
+#define SLV_SMMNOC_BIMC 10
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H */
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
index c7f7ed315aeb..2282f93607bc 100644
--- a/include/dt-bindings/interconnect/qcom,sm8350.h
+++ b/include/dt-bindings/interconnect/qcom,sm8350.h
@@ -119,9 +119,6 @@
#define SLAVE_SERVICE_GEM_NOC_1 16
#define SLAVE_SERVICE_GEM_NOC_2 17
#define SLAVE_SERVICE_GEM_NOC 18
-#define MASTER_MNOC_HF_MEM_NOC_DISP 19
-#define MASTER_MNOC_SF_MEM_NOC_DISP 20
-#define SLAVE_LLCC_DISP 21
#define MASTER_CNOC_LPASS_AG_NOC 0
#define SLAVE_LPASS_CORE_CFG 1
@@ -133,8 +130,6 @@
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
-#define MASTER_LLCC_DISP 2
-#define SLAVE_EBI1_DISP 3
#define MASTER_CAMNOC_HF 0
#define MASTER_CAMNOC_ICP 1
@@ -149,11 +144,6 @@
#define SLAVE_MNOC_HF_MEM_NOC 10
#define SLAVE_MNOC_SF_MEM_NOC 11
#define SLAVE_SERVICE_MNOC 12
-#define MASTER_MDP0_DISP 13
-#define MASTER_MDP1_DISP 14
-#define MASTER_ROTATOR_DISP 15
-#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
-#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
#define MASTER_CDSP_NOC_CFG 0
#define MASTER_CDSP_PROC 1
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 35b6f69b7db6..887f53363e8a 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -12,6 +12,8 @@
#define GIC_SPI 0
#define GIC_PPI 1
+#define GIC_ESPI 2
+#define GIC_EPPI 3
/*
* Interrupt specifier cell 2.
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
index fbfa3febc66d..fd85a79381b3 100644
--- a/include/dt-bindings/mailbox/qcom-ipcc.h
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -33,5 +33,7 @@
#define IPCC_CLIENT_NSP1 18
#define IPCC_CLIENT_TME 23
#define IPCC_CLIENT_WPSS 24
+#define IPCC_CLIENT_GPDSP0 31
+#define IPCC_CLIENT_GPDSP1 32
#endif
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1800b.h b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
new file mode 100644
index 000000000000..0593fc33d470
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1800B_H
+#define _DT_BINDINGS_PINCTRL_CV1800B_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AOUTR 1
+#define PIN_SD0_CLK 3
+#define PIN_SD0_CMD 4
+#define PIN_SD0_D0 5
+#define PIN_SD0_D1 7
+#define PIN_SD0_D2 8
+#define PIN_SD0_D3 9
+#define PIN_SD0_CD 11
+#define PIN_SD0_PWR_EN 12
+#define PIN_SPK_EN 14
+#define PIN_UART0_TX 15
+#define PIN_UART0_RX 16
+#define PIN_SPINOR_HOLD_X 17
+#define PIN_SPINOR_SCK 18
+#define PIN_SPINOR_MOSI 19
+#define PIN_SPINOR_WP_X 20
+#define PIN_SPINOR_MISO 21
+#define PIN_SPINOR_CS_X 22
+#define PIN_IIC0_SCL 23
+#define PIN_IIC0_SDA 24
+#define PIN_AUX0 25
+#define PIN_PWR_VBAT_DET 30
+#define PIN_PWR_SEQ2 31
+#define PIN_XTAL_XIN 33
+#define PIN_SD1_GPIO0 35
+#define PIN_SD1_GPIO1 36
+#define PIN_SD1_D3 38
+#define PIN_SD1_D2 39
+#define PIN_SD1_D1 40
+#define PIN_SD1_D0 41
+#define PIN_SD1_CMD 42
+#define PIN_SD1_CLK 43
+#define PIN_ADC1 44
+#define PIN_USB_VBUS_DET 45
+#define PIN_ETH_TXP 47
+#define PIN_ETH_TXM 48
+#define PIN_ETH_RXP 49
+#define PIN_ETH_RXM 50
+#define PIN_MIPIRX4N 56
+#define PIN_MIPIRX4P 57
+#define PIN_MIPIRX3N 58
+#define PIN_MIPIRX3P 59
+#define PIN_MIPIRX2N 60
+#define PIN_MIPIRX2P 61
+#define PIN_MIPIRX1N 62
+#define PIN_MIPIRX1P 63
+#define PIN_MIPIRX0N 64
+#define PIN_MIPIRX0P 65
+#define PIN_AUD_AINL_MIC 67
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1800B_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1812h.h b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
new file mode 100644
index 000000000000..2908de347919
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1812H_H
+#define _DT_BINDINGS_PINCTRL_CV1812H_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1812H_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv18xx.h b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
new file mode 100644
index 000000000000..bc92ad1067ec
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ *
+ * Author: Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV18XX_H
+#define _DT_BINDINGS_PINCTRL_CV18XX_H
+
+#define PIN_MUX_INVALD 0xff
+
+#define PINMUX2(pin, mux, mux2) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16) | (((mux2) & 0xff) << 24))
+
+#define PINMUX(pin, mux) \
+ PINMUX2(pin, mux, PIN_MUX_INVALD)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV18XX_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2000.h b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
new file mode 100644
index 000000000000..4871f9a7c6c1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2000_H
+#define _DT_BINDINGS_PINCTRL_SG2000_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2000_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2002.h b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
new file mode 100644
index 000000000000..3c36cfa0a550
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2002_H
+#define _DT_BINDINGS_PINCTRL_SG2002_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AINL_MIC 2
+#define PIN_AUD_AOUTR 4
+#define PIN_SD0_CLK 6
+#define PIN_SD0_CMD 7
+#define PIN_SD0_D0 8
+#define PIN_SD0_D1 10
+#define PIN_SD0_D2 11
+#define PIN_SD0_D3 12
+#define PIN_SD0_CD 14
+#define PIN_SD0_PWR_EN 15
+#define PIN_SPK_EN 17
+#define PIN_UART0_TX 18
+#define PIN_UART0_RX 19
+#define PIN_EMMC_DAT2 20
+#define PIN_EMMC_CLK 21
+#define PIN_EMMC_DAT0 22
+#define PIN_EMMC_DAT3 23
+#define PIN_EMMC_CMD 24
+#define PIN_EMMC_DAT1 25
+#define PIN_JTAG_CPU_TMS 26
+#define PIN_JTAG_CPU_TCK 27
+#define PIN_IIC0_SCL 28
+#define PIN_IIC0_SDA 29
+#define PIN_AUX0 30
+#define PIN_GPIO_ZQ 35
+#define PIN_PWR_VBAT_DET 38
+#define PIN_PWR_RSTN 39
+#define PIN_PWR_SEQ1 40
+#define PIN_PWR_SEQ2 41
+#define PIN_PWR_WAKEUP0 43
+#define PIN_PWR_BUTTON1 44
+#define PIN_XTAL_XIN 45
+#define PIN_PWR_GPIO0 47
+#define PIN_PWR_GPIO1 48
+#define PIN_PWR_GPIO2 49
+#define PIN_SD1_D3 51
+#define PIN_SD1_D2 52
+#define PIN_SD1_D1 53
+#define PIN_SD1_D0 54
+#define PIN_SD1_CMD 55
+#define PIN_SD1_CLK 56
+#define PIN_PWM0_BUCK 58
+#define PIN_ADC1 59
+#define PIN_USB_VBUS_DET 60
+#define PIN_ETH_TXP 62
+#define PIN_ETH_TXM 63
+#define PIN_ETH_RXP 64
+#define PIN_ETH_RXM 65
+#define PIN_GPIO_RTX 67
+#define PIN_MIPIRX4N 72
+#define PIN_MIPIRX4P 73
+#define PIN_MIPIRX3N 74
+#define PIN_MIPIRX3P 75
+#define PIN_MIPIRX2N 76
+#define PIN_MIPIRX2P 77
+#define PIN_MIPIRX1N 78
+#define PIN_MIPIRX1P 79
+#define PIN_MIPIRX0N 80
+#define PIN_MIPIRX0P 81
+#define PIN_MIPI_TXM2 83
+#define PIN_MIPI_TXP2 84
+#define PIN_MIPI_TXM1 85
+#define PIN_MIPI_TXP1 86
+#define PIN_MIPI_TXM0 87
+#define PIN_MIPI_TXP0 88
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2002_H */
diff --git a/include/dt-bindings/power/rockchip,rk3576-power.h b/include/dt-bindings/power/rockchip,rk3576-power.h
new file mode 100644
index 000000000000..324a056aa851
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3576-power.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3576_POWER_H__
+#define __DT_BINDINGS_POWER_RK3576_POWER_H__
+
+/* VD_NPU */
+#define RK3576_PD_NPU 0
+#define RK3576_PD_NPUTOP 1
+#define RK3576_PD_NPU0 2
+#define RK3576_PD_NPU1 3
+
+/* VD_GPU */
+#define RK3576_PD_GPU 4
+
+/* VD_LOGIC */
+#define RK3576_PD_NVM 5
+#define RK3576_PD_SDGMAC 6
+#define RK3576_PD_USB 7
+#define RK3576_PD_PHP 8
+#define RK3576_PD_SUBPHP 9
+#define RK3576_PD_AUDIO 10
+#define RK3576_PD_VEPU0 11
+#define RK3576_PD_VEPU1 12
+#define RK3576_PD_VPU 13
+#define RK3576_PD_VDEC 14
+#define RK3576_PD_VI 15
+#define RK3576_PD_VO0 16
+#define RK3576_PD_VO1 17
+#define RK3576_PD_VOP 18
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3576-cru.h b/include/dt-bindings/reset/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..ae856906f3a3
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3576-cru.h
@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+
+#define SRST_A_TOP_BIU 0
+#define SRST_P_TOP_BIU 1
+#define SRST_A_TOP_MID_BIU 2
+#define SRST_A_SECURE_HIGH_BIU 3
+#define SRST_H_TOP_BIU 4
+
+#define SRST_H_VO0VOP_CHANNEL_BIU 5
+#define SRST_A_VO0VOP_CHANNEL_BIU 6
+
+#define SRST_BISRINTF 7
+
+#define SRST_H_AUDIO_BIU 8
+#define SRST_H_ASRC_2CH_0 9
+#define SRST_H_ASRC_2CH_1 10
+#define SRST_H_ASRC_4CH_0 11
+#define SRST_H_ASRC_4CH_1 12
+#define SRST_ASRC_2CH_0 13
+#define SRST_ASRC_2CH_1 14
+#define SRST_ASRC_4CH_0 15
+#define SRST_ASRC_4CH_1 16
+#define SRST_M_SAI0_8CH 17
+#define SRST_H_SAI0_8CH 18
+#define SRST_H_SPDIF_RX0 19
+#define SRST_M_SPDIF_RX0 20
+
+#define SRST_H_SPDIF_RX1 21
+#define SRST_M_SPDIF_RX1 22
+#define SRST_M_SAI1_8CH 23
+#define SRST_H_SAI1_8CH 24
+#define SRST_M_SAI2_2CH 25
+#define SRST_H_SAI2_2CH 26
+#define SRST_M_SAI3_2CH 27
+#define SRST_H_SAI3_2CH 28
+
+#define SRST_M_SAI4_2CH 29
+#define SRST_H_SAI4_2CH 30
+#define SRST_H_ACDCDIG_DSM 31
+#define SRST_M_ACDCDIG_DSM 32
+#define SRST_PDM1 33
+#define SRST_H_PDM1 34
+#define SRST_M_PDM1 35
+#define SRST_H_SPDIF_TX0 36
+#define SRST_M_SPDIF_TX0 37
+#define SRST_H_SPDIF_TX1 38
+#define SRST_M_SPDIF_TX1 39
+
+#define SRST_A_BUS_BIU 40
+#define SRST_P_BUS_BIU 41
+#define SRST_P_CRU 42
+#define SRST_H_CAN0 43
+#define SRST_CAN0 44
+#define SRST_H_CAN1 45
+#define SRST_CAN1 46
+#define SRST_P_INTMUX2BUS 47
+#define SRST_P_VCCIO_IOC 48
+#define SRST_H_BUS_BIU 49
+#define SRST_KEY_SHIFT 50
+
+#define SRST_P_I2C1 51
+#define SRST_P_I2C2 52
+#define SRST_P_I2C3 53
+#define SRST_P_I2C4 54
+#define SRST_P_I2C5 55
+#define SRST_P_I2C6 56
+#define SRST_P_I2C7 57
+#define SRST_P_I2C8 58
+#define SRST_P_I2C9 59
+#define SRST_P_WDT_BUSMCU 60
+#define SRST_T_WDT_BUSMCU 61
+#define SRST_A_GIC 62
+#define SRST_I2C1 63
+#define SRST_I2C2 64
+#define SRST_I2C3 65
+#define SRST_I2C4 66
+
+#define SRST_I2C5 67
+#define SRST_I2C6 68
+#define SRST_I2C7 69
+#define SRST_I2C8 70
+#define SRST_I2C9 71
+#define SRST_P_SARADC 72
+#define SRST_SARADC 73
+#define SRST_P_TSADC 74
+#define SRST_TSADC 75
+#define SRST_P_UART0 76
+#define SRST_P_UART2 77
+#define SRST_P_UART3 78
+#define SRST_P_UART4 79
+#define SRST_P_UART5 80
+#define SRST_P_UART6 81
+
+#define SRST_P_UART7 82
+#define SRST_P_UART8 83
+#define SRST_P_UART9 84
+#define SRST_P_UART10 85
+#define SRST_P_UART11 86
+#define SRST_S_UART0 87
+#define SRST_S_UART2 88
+#define SRST_S_UART3 89
+#define SRST_S_UART4 90
+#define SRST_S_UART5 91
+
+#define SRST_S_UART6 92
+#define SRST_S_UART7 93
+#define SRST_S_UART8 94
+#define SRST_S_UART9 95
+#define SRST_S_UART10 96
+#define SRST_S_UART11 97
+#define SRST_P_SPI0 98
+#define SRST_P_SPI1 99
+#define SRST_P_SPI2 100
+
+#define SRST_P_SPI3 101
+#define SRST_P_SPI4 102
+#define SRST_SPI0 103
+#define SRST_SPI1 104
+#define SRST_SPI2 105
+#define SRST_SPI3 106
+#define SRST_SPI4 107
+#define SRST_P_WDT0 108
+#define SRST_T_WDT0 109
+#define SRST_P_SYS_GRF 110
+#define SRST_P_PWM1 111
+#define SRST_PWM1 112
+
+#define SRST_P_BUSTIMER0 113
+#define SRST_P_BUSTIMER1 114
+#define SRST_TIMER0 115
+#define SRST_TIMER1 116
+#define SRST_TIMER2 117
+#define SRST_TIMER3 118
+#define SRST_TIMER4 119
+#define SRST_TIMER5 120
+#define SRST_P_BUSIOC 121
+#define SRST_P_MAILBOX0 122
+#define SRST_P_GPIO1 123
+
+#define SRST_GPIO1 124
+#define SRST_P_GPIO2 125
+#define SRST_GPIO2 126
+#define SRST_P_GPIO3 127
+#define SRST_GPIO3 128
+#define SRST_P_GPIO4 129
+#define SRST_GPIO4 130
+#define SRST_A_DECOM 131
+#define SRST_P_DECOM 132
+#define SRST_D_DECOM 133
+#define SRST_TIMER6 134
+#define SRST_TIMER7 135
+#define SRST_TIMER8 136
+#define SRST_TIMER9 137
+#define SRST_TIMER10 138
+
+#define SRST_TIMER11 139
+#define SRST_A_DMAC0 140
+#define SRST_A_DMAC1 141
+#define SRST_A_DMAC2 142
+#define SRST_A_SPINLOCK 143
+#define SRST_REF_PVTPLL_BUS 144
+#define SRST_H_I3C0 145
+#define SRST_H_I3C1 146
+#define SRST_H_BUS_CM0_BIU 147
+#define SRST_F_BUS_CM0_CORE 148
+#define SRST_T_BUS_CM0_JTAG 149
+
+#define SRST_P_INTMUX2PMU 150
+#define SRST_P_INTMUX2DDR 151
+#define SRST_P_PVTPLL_BUS 152
+#define SRST_P_PWM2 153
+#define SRST_PWM2 154
+#define SRST_FREQ_PWM1 155
+#define SRST_COUNTER_PWM1 156
+#define SRST_I3C0 157
+#define SRST_I3C1 158
+
+#define SRST_P_DDR_MON_CH0 159
+#define SRST_P_DDR_BIU 160
+#define SRST_P_DDR_UPCTL_CH0 161
+#define SRST_TM_DDR_MON_CH0 162
+#define SRST_A_DDR_BIU 163
+#define SRST_DFI_CH0 164
+#define SRST_DDR_MON_CH0 165
+#define SRST_P_DDR_HWLP_CH0 166
+#define SRST_P_DDR_MON_CH1 167
+#define SRST_P_DDR_HWLP_CH1 168
+
+#define SRST_P_DDR_UPCTL_CH1 169
+#define SRST_TM_DDR_MON_CH1 170
+#define SRST_DFI_CH1 171
+#define SRST_A_DDR01_MSCH0 172
+#define SRST_A_DDR01_MSCH1 173
+#define SRST_DDR_MON_CH1 174
+#define SRST_DDR_SCRAMBLE_CH0 175
+#define SRST_DDR_SCRAMBLE_CH1 176
+#define SRST_P_AHB2APB 177
+#define SRST_H_AHB2APB 178
+#define SRST_H_DDR_BIU 179
+#define SRST_F_DDR_CM0_CORE 180
+
+#define SRST_P_DDR01_MSCH0 181
+#define SRST_P_DDR01_MSCH1 182
+#define SRST_DDR_TIMER0 183
+#define SRST_DDR_TIMER1 184
+#define SRST_T_WDT_DDR 185
+#define SRST_P_WDT 186
+#define SRST_P_TIMER 187
+#define SRST_T_DDR_CM0_JTAG 188
+#define SRST_P_DDR_GRF 189
+
+#define SRST_DDR_UPCTL_CH0 190
+#define SRST_A_DDR_UPCTL_0_CH0 191
+#define SRST_A_DDR_UPCTL_1_CH0 192
+#define SRST_A_DDR_UPCTL_2_CH0 193
+#define SRST_A_DDR_UPCTL_3_CH0 194
+#define SRST_A_DDR_UPCTL_4_CH0 195
+
+#define SRST_DDR_UPCTL_CH1 196
+#define SRST_A_DDR_UPCTL_0_CH1 197
+#define SRST_A_DDR_UPCTL_1_CH1 198
+#define SRST_A_DDR_UPCTL_2_CH1 199
+#define SRST_A_DDR_UPCTL_3_CH1 200
+#define SRST_A_DDR_UPCTL_4_CH1 201
+
+#define SRST_REF_PVTPLL_DDR 202
+#define SRST_P_PVTPLL_DDR 203
+
+#define SRST_A_RKNN0 204
+#define SRST_A_RKNN0_BIU 205
+#define SRST_L_RKNN0_BIU 206
+
+#define SRST_A_RKNN1 207
+#define SRST_A_RKNN1_BIU 208
+#define SRST_L_RKNN1_BIU 209
+
+#define SRST_NPU_DAP 210
+#define SRST_L_NPUSUBSYS_BIU 211
+#define SRST_P_NPUTOP_BIU 212
+#define SRST_P_NPU_TIMER 213
+#define SRST_NPUTIMER0 214
+#define SRST_NPUTIMER1 215
+#define SRST_P_NPU_WDT 216
+#define SRST_T_NPU_WDT 217
+
+#define SRST_A_RKNN_CBUF 218
+#define SRST_A_RVCORE0 219
+#define SRST_P_NPU_GRF 220
+#define SRST_P_PVTPLL_NPU 221
+#define SRST_NPU_PVTPLL 222
+#define SRST_H_NPU_CM0_BIU 223
+#define SRST_F_NPU_CM0_CORE 224
+#define SRST_T_NPU_CM0_JTAG 225
+#define SRST_A_RKNNTOP_BIU 226
+#define SRST_H_RKNN_CBUF 227
+#define SRST_H_RKNNTOP_BIU 228
+
+#define SRST_H_NVM_BIU 229
+#define SRST_A_NVM_BIU 230
+#define SRST_S_FSPI 231
+#define SRST_H_FSPI 232
+#define SRST_C_EMMC 233
+#define SRST_H_EMMC 234
+#define SRST_A_EMMC 235
+#define SRST_B_EMMC 236
+#define SRST_T_EMMC 237
+
+#define SRST_P_GRF 238
+#define SRST_P_PHP_BIU 239
+#define SRST_A_PHP_BIU 240
+#define SRST_P_PCIE0 241
+#define SRST_PCIE0_POWER_UP 242
+
+#define SRST_A_USB3OTG1 243
+#define SRST_A_MMU0 244
+#define SRST_A_SLV_MMU0 245
+#define SRST_A_MMU1 246
+
+#define SRST_A_SLV_MMU1 247
+#define SRST_P_PCIE1 248
+#define SRST_PCIE1_POWER_UP 249
+
+#define SRST_RXOOB0 250
+#define SRST_RXOOB1 251
+#define SRST_PMALIVE0 252
+#define SRST_PMALIVE1 253
+#define SRST_A_SATA0 254
+#define SRST_A_SATA1 255
+#define SRST_ASIC1 256
+#define SRST_ASIC0 257
+
+#define SRST_P_CSIDPHY1 258
+#define SRST_SCAN_CSIDPHY1 259
+
+#define SRST_P_SDGMAC_GRF 260
+#define SRST_P_SDGMAC_BIU 261
+#define SRST_A_SDGMAC_BIU 262
+#define SRST_H_SDGMAC_BIU 263
+#define SRST_A_GMAC0 264
+#define SRST_A_GMAC1 265
+#define SRST_P_GMAC0 266
+#define SRST_P_GMAC1 267
+#define SRST_H_SDIO 268
+
+#define SRST_H_SDMMC0 269
+#define SRST_S_FSPI1 270
+#define SRST_H_FSPI1 271
+#define SRST_A_DSMC_BIU 272
+#define SRST_A_DSMC 273
+#define SRST_P_DSMC 274
+#define SRST_H_HSGPIO 275
+#define SRST_HSGPIO 276
+#define SRST_A_HSGPIO 277
+
+#define SRST_H_RKVDEC 278
+#define SRST_H_RKVDEC_BIU 279
+#define SRST_A_RKVDEC_BIU 280
+#define SRST_RKVDEC_HEVC_CA 281
+#define SRST_RKVDEC_CORE 282
+
+#define SRST_A_USB_BIU 283
+#define SRST_P_USBUFS_BIU 284
+#define SRST_A_USB3OTG0 285
+#define SRST_A_UFS_BIU 286
+#define SRST_A_MMU2 287
+#define SRST_A_SLV_MMU2 288
+#define SRST_A_UFS_SYS 289
+
+#define SRST_A_UFS 290
+#define SRST_P_USBUFS_GRF 291
+#define SRST_P_UFS_GRF 292
+
+#define SRST_H_VPU_BIU 293
+#define SRST_A_JPEG_BIU 294
+#define SRST_A_RGA_BIU 295
+#define SRST_A_VDPP_BIU 296
+#define SRST_A_EBC_BIU 297
+#define SRST_H_RGA2E_0 298
+#define SRST_A_RGA2E_0 299
+#define SRST_CORE_RGA2E_0 300
+
+#define SRST_A_JPEG 301
+#define SRST_H_JPEG 302
+#define SRST_H_VDPP 303
+#define SRST_A_VDPP 304
+#define SRST_CORE_VDPP 305
+#define SRST_H_RGA2E_1 306
+#define SRST_A_RGA2E_1 307
+#define SRST_CORE_RGA2E_1 308
+#define SRST_H_EBC 309
+#define SRST_A_EBC 310
+#define SRST_D_EBC 311
+
+#define SRST_H_VEPU0_BIU 312
+#define SRST_A_VEPU0_BIU 313
+#define SRST_H_VEPU0 314
+#define SRST_A_VEPU0 315
+#define SRST_VEPU0_CORE 316
+
+#define SRST_A_VI_BIU 317
+#define SRST_H_VI_BIU 318
+#define SRST_P_VI_BIU 319
+#define SRST_D_VICAP 320
+#define SRST_A_VICAP 321
+#define SRST_H_VICAP 322
+#define SRST_ISP0 323
+#define SRST_ISP0_VICAP 324
+
+#define SRST_CORE_VPSS 325
+#define SRST_P_CSI_HOST_0 326
+#define SRST_P_CSI_HOST_1 327
+#define SRST_P_CSI_HOST_2 328
+#define SRST_P_CSI_HOST_3 329
+#define SRST_P_CSI_HOST_4 330
+
+#define SRST_CIFIN 331
+#define SRST_VICAP_I0CLK 332
+#define SRST_VICAP_I1CLK 333
+#define SRST_VICAP_I2CLK 334
+#define SRST_VICAP_I3CLK 335
+#define SRST_VICAP_I4CLK 336
+
+#define SRST_A_VOP_BIU 337
+#define SRST_A_VOP2_BIU 338
+#define SRST_H_VOP_BIU 339
+#define SRST_P_VOP_BIU 340
+#define SRST_H_VOP 341
+#define SRST_A_VOP 342
+#define SRST_D_VP0 343
+
+#define SRST_D_VP1 344
+#define SRST_D_VP2 345
+#define SRST_P_VOP2_BIU 346
+#define SRST_P_VOPGRF 347
+
+#define SRST_H_VO0_BIU 348
+#define SRST_P_VO0_BIU 349
+#define SRST_A_HDCP0_BIU 350
+#define SRST_P_VO0_GRF 351
+#define SRST_A_HDCP0 352
+#define SRST_H_HDCP0 353
+#define SRST_HDCP0 354
+
+#define SRST_P_DSIHOST0 355
+#define SRST_DSIHOST0 356
+#define SRST_P_HDMITX0 357
+#define SRST_HDMITX0_REF 358
+#define SRST_P_EDP0 359
+#define SRST_EDP0_24M 360
+
+#define SRST_M_SAI5_8CH 361
+#define SRST_H_SAI5_8CH 362
+#define SRST_M_SAI6_8CH 363
+#define SRST_H_SAI6_8CH 364
+#define SRST_H_SPDIF_TX2 365
+#define SRST_M_SPDIF_TX2 366
+#define SRST_H_SPDIF_RX2 367
+#define SRST_M_SPDIF_RX2 368
+
+#define SRST_H_SAI8_8CH 369
+#define SRST_M_SAI8_8CH 370
+
+#define SRST_H_VO1_BIU 371
+#define SRST_P_VO1_BIU 372
+#define SRST_M_SAI7_8CH 373
+#define SRST_H_SAI7_8CH 374
+#define SRST_H_SPDIF_TX3 375
+#define SRST_H_SPDIF_TX4 376
+#define SRST_H_SPDIF_TX5 377
+#define SRST_M_SPDIF_TX3 378
+
+#define SRST_DP0 379
+#define SRST_P_VO1_GRF 380
+#define SRST_A_HDCP1_BIU 381
+#define SRST_A_HDCP1 382
+#define SRST_H_HDCP1 383
+#define SRST_HDCP1 384
+#define SRST_H_SAI9_8CH 385
+#define SRST_M_SAI9_8CH 386
+#define SRST_M_SPDIF_TX4 387
+#define SRST_M_SPDIF_TX5 388
+
+#define SRST_GPU 389
+#define SRST_A_S_GPU_BIU 390
+#define SRST_A_M0_GPU_BIU 391
+#define SRST_P_GPU_BIU 392
+#define SRST_P_GPU_GRF 393
+#define SRST_GPU_PVTPLL 394
+#define SRST_P_PVTPLL_GPU 395
+
+#define SRST_A_CENTER_BIU 396
+#define SRST_A_DMA2DDR 397
+#define SRST_A_DDR_SHAREMEM 398
+#define SRST_A_DDR_SHAREMEM_BIU 399
+#define SRST_H_CENTER_BIU 400
+#define SRST_P_CENTER_GRF 401
+#define SRST_P_DMA2DDR 402
+#define SRST_P_SHAREMEM 403
+#define SRST_P_CENTER_BIU 404
+
+#define SRST_LINKSYM_HDMITXPHY0 405
+
+#define SRST_DP0_PIXELCLK 406
+#define SRST_PHY_DP0_TX 407
+#define SRST_DP1_PIXELCLK 408
+#define SRST_DP2_PIXELCLK 409
+
+#define SRST_H_VEPU1_BIU 410
+#define SRST_A_VEPU1_BIU 411
+#define SRST_H_VEPU1 412
+#define SRST_A_VEPU1 413
+#define SRST_VEPU1_CORE 414
+
+#define SRST_P_PHPPHY_CRU 415
+#define SRST_P_APB2ASB_SLV_CHIP_TOP 416
+#define SRST_P_PCIE2_COMBOPHY0 417
+#define SRST_P_PCIE2_COMBOPHY0_GRF 418
+#define SRST_P_PCIE2_COMBOPHY1 419
+#define SRST_P_PCIE2_COMBOPHY1_GRF 420
+
+#define SRST_PCIE0_PIPE_PHY 421
+#define SRST_PCIE1_PIPE_PHY 422
+
+#define SRST_H_CRYPTO_NS 423
+#define SRST_H_TRNG_NS 424
+#define SRST_P_OTPC_NS 425
+#define SRST_OTPC_NS 426
+
+#define SRST_P_HDPTX_GRF 427
+#define SRST_P_HDPTX_APB 428
+#define SRST_P_MIPI_DCPHY 429
+#define SRST_P_DCPHY_GRF 430
+#define SRST_P_BOT0_APB2ASB 431
+#define SRST_P_BOT1_APB2ASB 432
+#define SRST_USB2DEBUG 433
+#define SRST_P_CSIPHY_GRF 434
+#define SRST_P_CSIPHY 435
+#define SRST_P_USBPHY_GRF_0 436
+#define SRST_P_USBPHY_GRF_1 437
+#define SRST_P_USBDP_GRF 438
+#define SRST_P_USBDPPHY 439
+#define SRST_USBDP_COMBO_PHY_INIT 440
+
+#define SRST_USBDP_COMBO_PHY_CMN 441
+#define SRST_USBDP_COMBO_PHY_LANE 442
+#define SRST_USBDP_COMBO_PHY_PCS 443
+#define SRST_M_MIPI_DCPHY 444
+#define SRST_S_MIPI_DCPHY 445
+#define SRST_SCAN_CSIPHY 446
+#define SRST_P_VCCIO6_IOC 447
+#define SRST_OTGPHY_0 448
+#define SRST_OTGPHY_1 449
+#define SRST_HDPTX_INIT 450
+#define SRST_HDPTX_CMN 451
+#define SRST_HDPTX_LANE 452
+#define SRST_HDMITXHDP 453
+
+#define SRST_MPHY_INIT 454
+#define SRST_P_MPHY_GRF 455
+#define SRST_P_VCCIO7_IOC 456
+
+#define SRST_H_PMU1_BIU 457
+#define SRST_P_PMU1_NIU 458
+#define SRST_H_PMU_CM0_BIU 459
+#define SRST_PMU_CM0_CORE 460
+#define SRST_PMU_CM0_JTAG 461
+
+#define SRST_P_CRU_PMU1 462
+#define SRST_P_PMU1_GRF 463
+#define SRST_P_PMU1_IOC 464
+#define SRST_P_PMU1WDT 465
+#define SRST_T_PMU1WDT 466
+#define SRST_P_PMUTIMER 467
+#define SRST_PMUTIMER0 468
+#define SRST_PMUTIMER1 469
+#define SRST_P_PMU1PWM 470
+#define SRST_PMU1PWM 471
+
+#define SRST_P_I2C0 472
+#define SRST_I2C0 473
+#define SRST_S_UART1 474
+#define SRST_P_UART1 475
+#define SRST_PDM0 476
+#define SRST_H_PDM0 477
+
+#define SRST_M_PDM0 478
+#define SRST_H_VAD 479
+
+#define SRST_P_PMU0GRF 480
+#define SRST_P_PMU0IOC 481
+#define SRST_P_GPIO0 482
+#define SRST_DB_GPIO0 483
+
+#endif
diff --git a/include/dt-bindings/soc/qe-fsl,tsa.h b/include/dt-bindings/soc/qe-fsl,tsa.h
new file mode 100644
index 000000000000..3cf3df9c0968
--- /dev/null
+++ b/include/dt-bindings/soc/qe-fsl,tsa.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_SOC_FSL_QE_TSA_H
+#define __DT_BINDINGS_SOC_FSL_QE_TSA_H
+
+#define FSL_QE_TSA_NU 0
+#define FSL_QE_TSA_UCC1 1
+#define FSL_QE_TSA_UCC2 2
+#define FSL_QE_TSA_UCC3 3
+#define FSL_QE_TSA_UCC4 4
+#define FSL_QE_TSA_UCC5 5
+
+#endif
diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h
index 218ca22fb056..1b89088a2837 100644
--- a/include/keys/dns_resolver-type.h
+++ b/include/keys/dns_resolver-type.h
@@ -12,8 +12,4 @@
extern struct key_type key_type_dns_resolver;
-extern int request_dns_resolver_key(const char *description,
- const char *callout_info,
- char **data);
-
#endif /* _KEYS_DNS_RESOLVER_TYPE_H */
diff --git a/include/kunit/clk.h b/include/kunit/clk.h
new file mode 100644
index 000000000000..73bc99cefe7b
--- /dev/null
+++ b/include/kunit/clk.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_KUNIT_H
+#define _CLK_KUNIT_H
+
+struct clk;
+struct clk_hw;
+struct device;
+struct device_node;
+struct kunit;
+
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id);
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index);
+
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id);
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id);
+
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk);
+
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw);
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node,
+ struct clk_hw *hw);
+
+#endif
diff --git a/include/kunit/of.h b/include/kunit/of.h
new file mode 100644
index 000000000000..48d4e70c9666
--- /dev/null
+++ b/include/kunit/of.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_OF_H
+#define _KUNIT_OF_H
+
+#include <kunit/test.h>
+
+struct device_node;
+
+#ifdef CONFIG_OF
+
+void of_node_put_kunit(struct kunit *test, struct device_node *node);
+
+#else
+
+static inline
+void of_node_put_kunit(struct kunit *test, struct device_node *node)
+{
+ kunit_skip(test, "requires CONFIG_OF");
+}
+
+#endif /* !CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
+
+int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id);
+#else
+
+static inline int
+of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id)
+{
+ kunit_skip(test, "requires CONFIG_OF and CONFIG_OF_OVERLAY and CONFIG_OF_EARLY_FLATTREE for root node");
+ return -EINVAL;
+}
+
+#endif
+
+/**
+ * __of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() variant
+ * @test: test context
+ * @overlay_begin: start address of overlay to apply
+ * @overlay_end: end address of overlay to apply
+ *
+ * This is mostly internal API. See of_overlay_apply_kunit() for the wrapper
+ * that makes this easier to use.
+ *
+ * Similar to of_overlay_fdt_apply(), except the overlay is managed by the test
+ * case and is automatically removed with of_overlay_remove() after the test
+ * case concludes.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static inline int __of_overlay_apply_kunit(struct kunit *test,
+ u8 *overlay_begin,
+ const u8 *overlay_end)
+{
+ int unused;
+
+ return of_overlay_fdt_apply_kunit(test, overlay_begin,
+ overlay_end - overlay_begin,
+ &unused);
+}
+
+/**
+ * of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() for built-in overlays
+ * @test: test context
+ * @overlay_name: name of overlay to apply
+ *
+ * This macro is used to apply a device tree overlay built with the
+ * cmd_dt_S_dtbo rule in scripts/Makefile.lib that has been compiled into the
+ * kernel image or KUnit test module. The overlay is automatically removed when
+ * the test is finished.
+ *
+ * Unit tests that need device tree nodes should compile an overlay file with
+ * @overlay_name\.dtbo.o in their Makefile along with their unit test and then
+ * load the overlay during their test. The @overlay_name matches the filename
+ * of the overlay without the dtbo filename extension. If CONFIG_OF_OVERLAY is
+ * not enabled, the @test will be skipped.
+ *
+ * In the Makefile
+ *
+ * .. code-block:: none
+ *
+ * obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay_test.o kunit_overlay_test.dtbo.o
+ *
+ * In the test
+ *
+ * .. code-block:: c
+ *
+ * static void of_overlay_kunit_of_overlay_apply(struct kunit *test)
+ * {
+ * struct device_node *np;
+ *
+ * KUNIT_ASSERT_EQ(test, 0,
+ * of_overlay_apply_kunit(test, kunit_overlay_test));
+ *
+ * np = of_find_node_by_name(NULL, "test-kunit");
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ * of_node_put(np);
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+#define of_overlay_apply_kunit(test, overlay_name) \
+({ \
+ extern uint8_t __dtbo_##overlay_name##_begin[]; \
+ extern uint8_t __dtbo_##overlay_name##_end[]; \
+ \
+ __of_overlay_apply_kunit((test), \
+ __dtbo_##overlay_name##_begin, \
+ __dtbo_##overlay_name##_end); \
+})
+
+#endif
diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
new file mode 100644
index 000000000000..0fc0999d2420
--- /dev/null
+++ b/include/kunit/platform_device.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_PLATFORM_DRIVER_H
+#define _KUNIT_PLATFORM_DRIVER_H
+
+struct kunit;
+struct platform_device;
+struct platform_driver;
+
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id);
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev);
+
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x);
+
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv);
+
+#endif
diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
index 0dfe35feeec6..efff77b58dd6 100644
--- a/include/kunit/visibility.h
+++ b/include/kunit/visibility.h
@@ -22,6 +22,7 @@
* EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
* enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING)
* in test file in order to use symbols.
+ * @symbol: the symbol identifier to export
*/
#define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \
EXPORTED_FOR_KUNIT_TESTING)
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 35d4ca4f6122..e08aeec5d936 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -10,7 +10,7 @@
#include <linux/perf_event.h>
#include <linux/perf/arm_pmuv3.h>
-#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+#define KVM_ARMV8_PMU_MAX_COUNTERS 32
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
@@ -19,14 +19,14 @@ struct kvm_pmc {
};
struct kvm_pmu_events {
- u32 events_host;
- u32 events_guest;
+ u64 events_host;
+ u64 events_guest;
};
struct kvm_pmu {
struct irq_work overflow_work;
struct kvm_pmu_events events;
- struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+ struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
int irq_num;
bool created;
bool irq_level;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0687a442fec7..4d5ee84c468b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -107,6 +107,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_LPIC,
+ ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
};
@@ -362,6 +363,7 @@ void acpi_unregister_gsi (u32 gsi);
struct pci_dev;
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin);
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
@@ -386,7 +388,7 @@ extern bool acpi_is_pnp_device(struct acpi_device *);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-typedef void (*wmi_notify_handler) (u32 value, void *context);
+typedef void (*wmi_notify_handler) (union acpi_object *data, void *context);
int wmi_instance_count(const char *guid);
@@ -401,7 +403,6 @@ extern acpi_status wmi_set_block(const char *guid, u8 instance,
extern acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, void *data);
extern acpi_status wmi_remove_notify_handler(const char *guid);
-extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
extern char *wmi_get_acpi_device_uid(const char *guid);
@@ -1343,6 +1344,8 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
+void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr);
+
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
@@ -1529,6 +1532,12 @@ void acpi_arm_init(void);
static inline void acpi_arm_init(void) { }
#endif
+#ifdef CONFIG_RISCV
+void acpi_riscv_init(void);
+#else
+static inline void acpi_riscv_init(void) { }
+#endif
+
#ifdef CONFIG_ACPI_PCC
void acpi_init_pcc(void);
#else
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
index 50d88bf1498d..0ded9220d379 100644
--- a/include/linux/acpi_pmtmr.h
+++ b/include/linux/acpi_pmtmr.h
@@ -26,6 +26,19 @@ static inline u32 acpi_pm_read_early(void)
return acpi_pm_read_verified() & ACPI_PM_MASK;
}
+/**
+ * Register callback for suspend and resume event
+ *
+ * @cb Callback triggered on suspend and resume
+ * @data Data passed with the callback
+ */
+void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data);
+
+/**
+ * Remove registered callback for suspend and resume event
+ */
+void acpi_pmtmr_unregister_suspend_resume_callback(void);
+
#else
static inline u32 acpi_pm_read_early(void)
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 8c61ccd161ba..1f0a9ff23a2c 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -70,7 +70,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
/*
* When percpu variables are required to be defined as weak, static percpu
* variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
- * Instead we will accound all module allocations to a single counter.
+ * Instead we will account all module allocations to a single counter.
*/
DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
@@ -137,7 +137,16 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
/* Caller should verify both ref and tag to be valid */
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
+ alloc_tag_add_check(ref, tag);
+ if (!ref || !tag)
+ return;
+
ref->ct = &tag->ct;
+}
+
+static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ __alloc_tag_ref_set(ref, tag);
/*
* We need in increment the call counter every time we have a new
* allocation or when we split a large allocation into smaller ones.
@@ -147,22 +156,9 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
this_cpu_inc(tag->counters->calls);
}
-static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
-{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
-}
-
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
+ alloc_tag_ref_set(ref, tag);
this_cpu_add(tag->counters->bytes, bytes);
}
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 958a55bcc708..dda2f3ea89cb 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -105,7 +105,7 @@ enum amba_vendor {
AMBA_VENDOR_LSI = 0xb6,
};
-extern struct bus_type amba_bustype;
+extern const struct bus_type amba_bustype;
#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
diff --git a/include/linux/args.h b/include/linux/args.h
index 8ff60a54eb7d..2e8e65d975c7 100644
--- a/include/linux/args.h
+++ b/include/linux/args.h
@@ -17,9 +17,9 @@
* that as _n.
*/
-/* This counts to 12. Any more, it will return 13th argument. */
-#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
-#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+/* This counts to 15. Any more, it will return 16th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
/* Concatenate two parameters, but allow them to be expanded beforehand. */
#define __CONCAT(a, b) a ## b
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 083f85653716..f59099a213d0 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -115,6 +115,70 @@
/* KVM "vendor specific" services */
#define ARM_SMCCC_KVM_FUNC_FEATURES 0
#define ARM_SMCCC_KVM_FUNC_PTP 1
+/* Start of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_HYP_MEMINFO 2
+#define ARM_SMCCC_KVM_FUNC_MEM_SHARE 3
+#define ARM_SMCCC_KVM_FUNC_MEM_UNSHARE 4
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_5 5
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_6 6
+#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD 7
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_8 8
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_9 9
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_10 10
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_11 11
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_12 12
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_13 13
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_14 14
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_15 15
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_16 16
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_17 17
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_18 18
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_19 19
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_20 20
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_21 21
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_22 22
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_23 23
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_24 24
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_25 25
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_26 26
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_27 27
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_28 28
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_29 29
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_30 30
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_31 31
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_32 32
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_33 33
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_34 34
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_35 35
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_36 36
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_37 37
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_38 38
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_39 39
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_40 40
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_41 41
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_42 42
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_43 43
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_44 44
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_45 45
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_46 46
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_47 47
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_48 48
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_49 49
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_50 50
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_51 51
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_52 52
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_53 53
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_54 54
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_55 55
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_56 56
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_57 57
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_58 58
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_59 59
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_60 60
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_61 61
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63
+/* End of pKVM hypercall range */
#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
#define ARM_SMCCC_KVM_NUM_FUNCS 128
@@ -137,6 +201,30 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_PTP)
+#define ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_HYP_MEMINFO)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_SHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_UNSHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
+
/* ptp_kvm counter type ID */
#define KVM_PTP_VIRT_COUNTER 0
#define KVM_PTP_PHYS_COUNTER 1
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 89683f31ae12..a28e2a6a13d0 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -73,6 +73,11 @@
#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
+#define FFA_CONSOLE_LOG FFA_SMC_32(0x8A)
+#define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B)
+#define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C)
+#define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D)
+#define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E)
/*
* For some calls it is necessary to use SMC64 to pass or return 64-bit values.
@@ -265,6 +270,11 @@ struct ffa_indirect_msg_hdr {
u32 size;
};
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */
+struct ffa_send_direct_data2 {
+ unsigned long data[14]; /* x4-x17 */
+};
+
struct ffa_mem_region_addr_range {
/* The base IPA of the constituent memory region, aligned to 4 kiB */
u64 address;
@@ -426,6 +436,8 @@ struct ffa_msg_ops {
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
+ int (*sync_send_receive2)(struct ffa_device *dev, const uuid_t *uuid,
+ struct ffa_send_direct_data2 *data);
};
struct ffa_mem_ops {
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index e4004d1e6725..b3643de9931d 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -61,14 +61,8 @@ int attribute_container_device_trigger_safe(struct device *dev,
int (*undo)(struct attribute_container *,
struct device *,
struct device *));
-void attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *));
int attribute_container_add_attrs(struct device *classdev);
int attribute_container_add_class_device(struct device *classdev);
-int attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev);
void attribute_container_remove_attrs(struct device *classdev);
void attribute_container_class_device_del(struct device *classdev);
struct attribute_container *attribute_container_classdev_to_container(struct device *);
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index 662b8ae54b6a..31762324bcc9 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -271,6 +271,6 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
struct auxiliary_device *auxiliary_find_device(struct device *start,
const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 68da8dba5162..dba41b65ae0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -203,7 +203,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
/* PCIE Root Capability Register bits (Host mode only) */
-#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
+#define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001
struct bcma_drv_pci;
struct bcma_bus;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a46e2047bea4..faceadb040f9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -324,8 +324,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
-struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
- unsigned *segs, struct bio_set *bs, unsigned max_bytes);
+int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index d3b66d77df7a..262b6596eca5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -203,12 +203,12 @@ unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
-static inline unsigned long
-bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+static __always_inline
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
@@ -228,7 +228,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
-static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -238,7 +238,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
memset(dst, 0, len);
}
-static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -248,8 +248,8 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
memset(dst, 0xff, len);
}
-static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -262,8 +262,8 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
/*
* Copy bitmap and clear tail bits in last word.
*/
-static inline void bitmap_copy_clear_tail(unsigned long *dst,
- const unsigned long *src, unsigned int nbits)
+static __always_inline
+void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % BITS_PER_LONG)
@@ -318,16 +318,18 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif
-static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
-static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -335,8 +337,9 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
-static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
@@ -344,16 +347,17 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
-static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
@@ -368,8 +372,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline bool bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -388,10 +392,9 @@ static inline bool bitmap_equal(const unsigned long *src1,
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
-static inline bool bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits)
+static __always_inline
+bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
+ const unsigned long *src3, unsigned int nbits)
{
if (!small_const_nbits(nbits))
return __bitmap_or_equal(src1, src2, src3, nbits);
@@ -399,9 +402,8 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline bool bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2,
- unsigned int nbits)
+static __always_inline
+bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -409,8 +411,8 @@ static inline bool bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline bool bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -418,7 +420,8 @@ static inline bool bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
+static __always_inline
+bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -426,7 +429,8 @@ static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
+static __always_inline
+bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -460,8 +464,8 @@ unsigned long bitmap_weight_andnot(const unsigned long *src1,
return __bitmap_weight_andnot(src1, src2, nbits);
}
-static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
@@ -476,8 +480,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
__bitmap_set(map, start, nbits);
}
-static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
@@ -492,8 +496,9 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
__bitmap_clear(map, start, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
@@ -501,8 +506,9 @@ static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *s
__bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
@@ -510,11 +516,12 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline void bitmap_replace(unsigned long *dst,
- const unsigned long *old,
- const unsigned long *new,
- const unsigned long *mask,
- unsigned int nbits)
+static __always_inline
+void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*old & ~(*mask)) | (*new & *mask);
@@ -557,8 +564,9 @@ static inline void bitmap_replace(unsigned long *dst,
* bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
* See bitmap_scatter() for details related to this relationship.
*/
-static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -611,8 +619,9 @@ static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
* bitmap_scatter(res, src, mask, n) and a call to
* bitmap_scatter(res, result, mask, n) will lead to the same res value.
*/
-static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -623,9 +632,9 @@ static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
__assign_bit(n++, dst, test_bit(bit, src));
}
-static inline void bitmap_next_set_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+static __always_inline
+void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
+ unsigned int *re, unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
@@ -640,7 +649,8 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*/
-static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
bitmap_clear(bitmap, pos, BIT(order));
}
@@ -656,7 +666,8 @@ static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos
* Returns: 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
-static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
unsigned int len = BIT(order);
@@ -680,7 +691,8 @@ static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos
* Returns: the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
-static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+static __always_inline
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
unsigned int pos, end; /* scans bitmap by regions of size order */
@@ -734,7 +746,7 @@ static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bi
* That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
-static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
bitmap_from_arr64(dst, &mask, 64);
}
@@ -749,9 +761,8 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
* @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
* value is undefined.
*/
-static inline unsigned long bitmap_read(const unsigned long *map,
- unsigned long start,
- unsigned long nbits)
+static __always_inline
+unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
size_t index = BIT_WORD(start);
unsigned long offset = start % BITS_PER_LONG;
@@ -784,8 +795,9 @@ static inline unsigned long bitmap_read(const unsigned long *map,
*
* For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
-static inline void bitmap_write(unsigned long *map, unsigned long value,
- unsigned long start, unsigned long nbits)
+static __always_inline
+void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
{
size_t index;
unsigned long offset;
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 0eb24d21aac2..60044b608817 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -36,4 +36,19 @@
#define GENMASK_ULL(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __GENMASK_U128() depends on _BIT128() which would not work
+ * in the asm code, as it shifts an 'unsigned __init128' data
+ * type instead of direct representation of 128 bit constants
+ * such as long and unsigned long. The fundamental problem is
+ * that a 128 bit constant will get silently truncated by the
+ * gcc compiler.
+ */
+#define GENMASK_U128(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
+#endif
+
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index de98049b7ded..676f8f860c47 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -25,9 +25,10 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes, u32 seed);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
@@ -96,12 +97,18 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
{
return 0;
}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
+static inline int blk_rq_map_integrity_sg(struct request *q,
struct scatterlist *s)
{
return 0;
}
+static inline int blk_rq_integrity_map_user(struct request *rq,
+ void __user *ubuf,
+ ssize_t bytes,
+ u32 seed)
+{
+ return -EINVAL;
+}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
return NULL;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8d304b1d16b1..4fecf46ef681 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -149,10 +149,7 @@ struct request {
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
-
-#ifdef CONFIG_BLK_DEV_INTEGRITY
unsigned short nr_integrity_segments;
-#endif
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *crypt_ctx;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 36ed96133217..dce7615c35e7 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -71,6 +71,9 @@ struct block_device {
struct partition_meta_info *bd_meta_info;
int bd_writers;
+#ifdef CONFIG_SECURITY
+ void *bd_security;
+#endif
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
@@ -248,11 +251,9 @@ struct bio {
struct bio_crypt_ctx *bi_crypt_context;
#endif
- union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- };
unsigned short bi_vcnt; /* how many bio_vec's */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b7664d593486..50c3b959da28 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -968,8 +968,6 @@ static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
/*
* Access functions for manipulating queue properties
*/
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
@@ -1187,7 +1185,8 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
+static inline unsigned int
+queue_limits_max_zone_append_sectors(const struct queue_limits *l)
{
unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3b94ec161e8c..19d8ca8ac960 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -294,6 +294,7 @@ struct bpf_map {
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct {
+ const struct btf_type *attach_func_proto;
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
@@ -694,6 +695,11 @@ enum bpf_type_flag {
/* DYNPTR points to xdp_buff */
DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+ /* Memory must be aligned on some architectures, used in combination with
+ * MEM_FIXED_SIZE.
+ */
+ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
@@ -731,8 +737,6 @@ enum bpf_arg_type {
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
- ARG_PTR_TO_INT, /* pointer to int */
- ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
@@ -743,7 +747,7 @@ enum bpf_arg_type {
ARG_PTR_TO_STACK, /* pointer to stack */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
- ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
__BPF_ARG_TYPE_MAX,
@@ -807,6 +811,12 @@ struct bpf_func_proto {
bool gpl_only;
bool pkt_access;
bool might_sleep;
+ /* set to true if helper follows contract for llvm
+ * attribute bpf_fastcall:
+ * - void functions do not scratch r0
+ * - functions taking N arguments scratch only registers r1-rN
+ */
+ bool allow_fastcall;
enum bpf_return_type ret_type;
union {
struct {
@@ -919,6 +929,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
+ bool is_ldsx;
union {
int ctx_field_size;
struct {
@@ -927,6 +938,7 @@ struct bpf_insn_access_aux {
};
};
struct bpf_verifier_log *log; /* for verbose logs */
+ bool is_retval; /* is accessing function return value ? */
};
static inline void
@@ -965,6 +977,8 @@ struct bpf_verifier_ops {
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
+ s16 ctx_stack_off);
int (*gen_ld_abs)(const struct bpf_insn *orig,
struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
@@ -1795,6 +1809,7 @@ struct bpf_struct_ops_common_value {
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
@@ -1851,6 +1866,10 @@ static inline void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
+static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ return -ENOTSUPP;
+}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
@@ -2227,7 +2246,16 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
-struct bpf_map *__bpf_map_get(struct fd f);
+
+static inline struct bpf_map *__bpf_map_get(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
@@ -3184,7 +3212,9 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
@@ -3192,6 +3222,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index 1de7ece5d36d..aefcd6564251 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM
@@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode);
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range);
#else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
@@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
{
}
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7b776dae36e5..4513372c5bc8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -23,6 +23,8 @@
* (in the "-8,-16,...,-512" form)
*/
#define TMP_STR_BUF_LEN 320
+/* Patch buffer size */
+#define INSN_BUF_SIZE 32
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
@@ -371,6 +373,10 @@ struct bpf_jmp_history_entry {
u32 prev_idx : 22;
/* special flags, e.g., whether insn is doing register stack spill/load */
u32 flags : 10;
+ /* additional registers that need precision tracking when this
+ * jump is backtracked, vector of six 10-bit records
+ */
+ u64 linked_regs;
};
/* Maximum number of register states that can exist at once */
@@ -572,6 +578,14 @@ struct bpf_insn_aux_data {
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
+ /* true if STX or LDX instruction is a part of a spill/fill
+ * pattern for a bpf_fastcall call.
+ */
+ u8 fastcall_pattern:1;
+ /* for CALL instructions, a number of spill/fill pairs in the
+ * bpf_fastcall pattern.
+ */
+ u8 fastcall_spills_num:3;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
@@ -641,6 +655,10 @@ struct bpf_subprog_info {
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
u16 stack_extra;
+ /* offsets in range [stack_depth .. fastcall_stack_off)
+ * are used for bpf_fastcall spills and fills.
+ */
+ s16 fastcall_stack_off;
bool has_tail_call: 1;
bool tail_call_reachable: 1;
bool has_ld_abs: 1;
@@ -648,6 +666,8 @@ struct bpf_subprog_info {
bool is_async_cb: 1;
bool is_exception_cb: 1;
bool args_cached: 1;
+ /* true if bpf_fastcall stack region is used by functions that can't be inlined */
+ bool keep_fastcall_stack: 1;
u8 arg_cnt;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
@@ -762,6 +782,8 @@ struct bpf_verifier_env {
* e.g., in reg_type_str() to generate reg_type string
*/
char tmp_str_buf[TMP_STR_BUF_LEN];
+ struct bpf_insn insn_buf[INSN_BUF_SIZE];
+ struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
@@ -905,6 +927,11 @@ static inline bool type_is_sk_pointer(enum bpf_reg_type type)
type == PTR_TO_XDP_SOCK;
}
+static inline bool type_may_be_null(u32 type)
+{
+ return type & PTR_MAYBE_NULL;
+}
+
static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
{
env->scratched_regs |= 1U << regno;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index cffb43133c68..b8a583194c4a 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -580,6 +580,7 @@ bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
bool btf_types_are_same(const struct btf *btf1, u32 id1,
const struct btf *btf2, u32 id2);
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
@@ -654,6 +655,10 @@ static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
{
return false;
}
+static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
+{
+ return -EOPNOTSUPP;
+}
#endif
static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 14acf1bbe0ce..932139c5d46f 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -199,8 +199,7 @@ void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset);
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
gfp_t gfp);
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- bool retry);
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size);
struct buffer_head *create_empty_buffers(struct folio *folio,
unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
@@ -258,18 +257,18 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- struct page **pagep, get_block_t *get_block);
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ struct folio **foliop, get_block_t *get_block);
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
+ loff_t, unsigned len, unsigned copied,
+ struct folio *, void *);
int generic_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
+ loff_t, unsigned len, unsigned copied,
+ struct folio *, void *);
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, struct page **, void **,
+ unsigned, struct folio **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
void block_commit_write(struct page *page, unsigned int from, unsigned int to);
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 20aa3c2d89f7..014a88c41073 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -7,8 +7,8 @@
#define BUILD_ID_SIZE_MAX 20
struct vm_area_struct;
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size);
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 04f3ace5787b..8fc1aed64113 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -6,7 +6,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ceph/types.h>
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 4497d0a6772c..15fb566d3f46 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -4,7 +4,7 @@
#include <linux/ceph/ceph_debug.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/backing-dev.h>
#include <linux/completion.h>
#include <linux/exportfs.h>
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index f66f6aac74f6..d7941478158c 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -449,8 +449,6 @@ extern int ceph_osdc_init(struct ceph_osd_client *osdc,
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
-extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
- struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ae04035b6cbe..47ae4c4d924c 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -172,7 +172,11 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* siblings list anchored at the parent's ->children */
+ /*
+ * siblings list anchored at the parent's ->children
+ *
+ * linkage is protected by cgroup_mutex or RCU
+ */
struct list_head sibling;
struct list_head children;
@@ -210,6 +214,14 @@ struct cgroup_subsys_state {
* fields of the containing structure.
*/
struct cgroup_subsys_state *parent;
+
+ /*
+ * Keep track of total numbers of visible descendant CSSes.
+ * The total number of dying CSSes is tracked in
+ * css->cgroup->nr_dying_subsys[ssid].
+ * Protected by cgroup_mutex.
+ */
+ int nr_descendants;
};
/*
@@ -470,6 +482,12 @@ struct cgroup {
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+ /*
+ * Keep track of total number of dying CSSes at and below this cgroup.
+ * Protected by cgroup_mutex.
+ */
+ int nr_dying_subsys[CGROUP_SUBSYS_COUNT];
+
struct cgroup_root *root;
/*
@@ -775,6 +793,11 @@ struct cgroup_subsys {
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+struct cgroup_of_peak {
+ unsigned long value;
+ struct list_head list;
+};
+
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c60ba0ab1462..f8ef47f8a634 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/nodemask.h>
+#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/fs.h>
@@ -28,8 +29,6 @@
struct kernel_clone_args;
-#ifdef CONFIG_CGROUPS
-
/*
* All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
@@ -39,6 +38,8 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
+#ifdef CONFIG_CGROUPS
+
enum {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
@@ -854,4 +855,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index d9e613803df1..038b2d523bf8 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -4,6 +4,142 @@
#include <linux/compiler.h>
+/**
+ * DOC: scope-based cleanup helpers
+ *
+ * The "goto error" pattern is notorious for introducing subtle resource
+ * leaks. It is tedious and error prone to add new resource acquisition
+ * constraints into code paths that already have several unwind
+ * conditions. The "cleanup" helpers enable the compiler to help with
+ * this tedium and can aid in maintaining LIFO (last in first out)
+ * unwind ordering to avoid unintentional leaks.
+ *
+ * As drivers make up the majority of the kernel code base, here is an
+ * example of using these helpers to clean up PCI drivers. The target of
+ * the cleanups are occasions where a goto is used to unwind a device
+ * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
+ * before returning.
+ *
+ * The DEFINE_FREE() macro can arrange for PCI device references to be
+ * dropped when the associated variable goes out of scope::
+ *
+ * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ * ...
+ * struct pci_dev *dev __free(pci_dev_put) =
+ * pci_get_slot(parent, PCI_DEVFN(0, 0));
+ *
+ * The above will automatically call pci_dev_put() if @dev is non-NULL
+ * when @dev goes out of scope (automatic variable scope). If a function
+ * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
+ * freeing it) on success, it can do::
+ *
+ * return no_free_ptr(dev);
+ *
+ * ...or::
+ *
+ * return_ptr(dev);
+ *
+ * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
+ * dropped when the scope where guard() is invoked ends::
+ *
+ * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+ * ...
+ * guard(pci_dev)(dev);
+ *
+ * The lifetime of the lock obtained by the guard() helper follows the
+ * scope of automatic variable declaration. Take the following example::
+ *
+ * func(...)
+ * {
+ * if (...) {
+ * ...
+ * guard(pci_dev)(dev); // pci_dev_lock() invoked here
+ * ...
+ * } // <- implied pci_dev_unlock() triggered here
+ * }
+ *
+ * Observe the lock is held for the remainder of the "if ()" block not
+ * the remainder of "func()".
+ *
+ * Now, when a function uses both __free() and guard(), or multiple
+ * instances of __free(), the LIFO order of variable definition order
+ * matters. GCC documentation says:
+ *
+ * "When multiple variables in the same scope have cleanup attributes,
+ * at exit from the scope their associated cleanup functions are run in
+ * reverse order of definition (last defined, first cleanup)."
+ *
+ * When the unwind order matters it requires that variables be defined
+ * mid-function scope rather than at the top of the file. Take the
+ * following example and notice the bug highlighted by "!!"::
+ *
+ * LIST_HEAD(list);
+ * DEFINE_MUTEX(lock);
+ *
+ * struct object {
+ * struct list_head node;
+ * };
+ *
+ * static struct object *alloc_add(void)
+ * {
+ * struct object *obj;
+ *
+ * lockdep_assert_held(&lock);
+ * obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ * if (obj) {
+ * LIST_HEAD_INIT(&obj->node);
+ * list_add(obj->node, &list):
+ * }
+ * return obj;
+ * }
+ *
+ * static void remove_free(struct object *obj)
+ * {
+ * lockdep_assert_held(&lock);
+ * list_del(&obj->node);
+ * kfree(obj);
+ * }
+ *
+ * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
+ * static int init(void)
+ * {
+ * struct object *obj __free(remove_free) = NULL;
+ * int err;
+ *
+ * guard(mutex)(&lock);
+ * obj = alloc_add();
+ *
+ * if (!obj)
+ * return -ENOMEM;
+ *
+ * err = other_init(obj);
+ * if (err)
+ * return err; // remove_free() called without the lock!!
+ *
+ * no_free_ptr(obj);
+ * return 0;
+ * }
+ *
+ * That bug is fixed by changing init() to call guard() and define +
+ * initialize @obj in this order::
+ *
+ * guard(mutex)(&lock);
+ * struct object *obj __free(remove_free) = alloc_add();
+ *
+ * Given that the "__free(...) = NULL" pattern for variables defined at
+ * the top of the function poses this potential interdependency problem
+ * the recommendation is to always define and assign variables in one
+ * statement and not group variable definitions at the top of the
+ * function when __free() is used.
+ *
+ * Lastly, given that the benefit of cleanup helpers is removal of
+ * "goto", and that the "goto" statement can jump between scopes, the
+ * expectation is that usage of "goto" and cleanup helpers is never
+ * mixed in the same function. I.e. for a given routine, convert all
+ * resources that need a "goto" cleanup to scope-based cleanup, or
+ * convert none of them.
+ */
+
/*
* DEFINE_FREE(name, type, free):
* simple helper macro that defines the required wrapper for a __free()
@@ -98,7 +234,7 @@ const volatile void * __must_check_fn(const volatile void *val)
* DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
*
* CLASS(fdget, f)(fd);
- * if (!f.file)
+ * if (!fd_file(f))
* return -EBADF;
*
* // use 'f' without concern
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 4a537260f655..7e43caabb54b 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -394,6 +394,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
NULL, (flags), (fixed_rate), 0, 0, true)
/**
+ * devm_clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, true)
+/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
* @dev: device that is registering this clock
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0fa56d672532..851a0f2cf42c 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -641,6 +641,32 @@ struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
/**
+ * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() +
+ * clk_set_rate() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ * @rate: new clock rate
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled and rate was set.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -982,6 +1008,13 @@ static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
return NULL;
}
+static inline struct clk *
+devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id,
+ unsigned long rate)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
diff --git a/include/linux/closure.h b/include/linux/closure.h
index 2af44427107d..880fe85e35e9 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -454,4 +454,39 @@ do { \
__closure_wait_event(waitlist, _cond); \
} while (0)
+#define __closure_wait_event_timeout(waitlist, _cond, _until) \
+({ \
+ struct closure cl; \
+ long _t; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) { \
+ _t = max_t(long, 1L, _until - jiffies); \
+ break; \
+ } \
+ _t = max_t(long, 0L, _until - jiffies); \
+ if (!_t) \
+ break; \
+ closure_sync_timeout(&cl, _t); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+ _t; \
+})
+
+/*
+ * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
+ * condition became true
+ */
+#define closure_wait_event_timeout(waitlist, _cond, _timeout) \
+({ \
+ unsigned long _until = jiffies + _timeout; \
+ (_cond) \
+ ? max_t(long, 1L, _until - jiffies) \
+ : __closure_wait_event_timeout(waitlist, _cond, _until);\
+})
+
#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9db877506ea8..d15b64f51336 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -52,4 +52,20 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern void cma_reserve_pages_on_error(struct cma *cma);
+
+#ifdef CONFIG_CMA
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
+bool cma_free_folio(struct cma *cma, const struct folio *folio);
+#else
+static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 2df665fa2964..4d4e23b6e3e7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -133,7 +133,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table")
+#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
#else /* !CONFIG_OBJTOOL */
#define annotate_reachable()
@@ -242,6 +242,9 @@ static inline void *offset_to_ptr(const int *off)
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
+#define __must_be_cstr(p) BUILD_BUG_ON_ZERO(__annotated(p, nonstring))
+
/*
* This returns a constant expression while determining if an argument is
* a constant expression, most importantly without evaluating the argument.
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index f14c275950b5..1a957ea2f4fe 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -421,6 +421,13 @@ struct ftrace_likely_data {
#define __member_size(p) __builtin_object_size(p, 1)
#endif
+/* Determine if an attribute has been applied to a variable. */
+#if __has_builtin(__builtin_has_attribute)
+#define __annotated(var, attr) __builtin_has_attribute(var, attr)
+#else
+#define __annotated(var, attr) (false)
+#endif
+
/*
* Some versions of gcc do not mark 'asm goto' volatile:
*
diff --git a/include/linux/console.h b/include/linux/console.h
index 31a8f5b85f5d..eba367bf605d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -16,7 +16,9 @@
#include <linux/atomic.h>
#include <linux/bits.h>
+#include <linux/irq_work.h>
#include <linux/rculist.h>
+#include <linux/rcuwait.h>
#include <linux/types.h>
#include <linux/vesa.h>
@@ -303,7 +305,7 @@ struct nbcon_write_context {
/**
* struct console - The console descriptor structure
* @name: The name of the console driver
- * @write: Write callback to output messages (Optional)
+ * @write: Legacy write callback to output messages (Optional)
* @read: Read callback for console input (Optional)
* @device: The underlying TTY device driver (Optional)
* @unblank: Callback to unblank the console (Optional)
@@ -320,10 +322,14 @@ struct nbcon_write_context {
* @data: Driver private data
* @node: hlist node for the console list
*
- * @write_atomic: Write callback for atomic context
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
* @pbufs: Pointer to nbcon private buffer
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
+ * @irq_work: Defer @kthread waking to IRQ work context
*/
struct console {
char name[16];
@@ -345,11 +351,121 @@ struct console {
struct hlist_node node;
/* nbcon console specific members */
- bool (*write_atomic)(struct console *con,
- struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_atomic:
+ *
+ * NBCON callback to write out text in any context. (Optional)
+ *
+ * This callback is called with the console already acquired. However,
+ * a higher priority context is allowed to take it over by default.
+ *
+ * The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe()
+ * around any code where the takeover is not safe, for example, when
+ * manipulating the serial port registers.
+ *
+ * nbcon_enter_unsafe() will fail if the context has lost the console
+ * ownership in the meantime. In this case, the callback is no longer
+ * allowed to go forward. It must back out immediately and carefully.
+ * The buffer content is also no longer trusted since it no longer
+ * belongs to the context.
+ *
+ * The callback should allow the takeover whenever it is safe. It
+ * increases the chance to see messages when the system is in trouble.
+ * If the driver must reacquire ownership in order to finalize or
+ * revert hardware changes, nbcon_reacquire_nobuf() can be used.
+ * However, on reacquire the buffer content is no longer available. A
+ * reacquire cannot be used to resume printing.
+ *
+ * The callback can be called from any context (including NMI).
+ * Therefore it must avoid usage of any locking and instead rely
+ * on the console ownership for synchronization.
+ */
+ void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_thread:
+ *
+ * NBCON callback to write out text in task context.
+ *
+ * This callback must be called only in task context with both
+ * device_lock() and the nbcon console acquired with
+ * NBCON_PRIO_NORMAL.
+ *
+ * The same rules for console ownership verification and unsafe
+ * sections handling applies as with write_atomic().
+ *
+ * The console ownership handling is necessary for synchronization
+ * against write_atomic() which is synchronized only via the context.
+ *
+ * The device_lock() provides the primary serialization for operations
+ * on the device. It might be as relaxed (mutex)[*] or as tight
+ * (disabled preemption and interrupts) as needed. It allows
+ * the kthread to operate in the least restrictive mode[**].
+ *
+ * [*] Standalone nbcon_context_try_acquire() is not safe with
+ * the preemption enabled, see nbcon_owner_matches(). But it
+ * can be safe when always called in the preemptive context
+ * under the device_lock().
+ *
+ * [**] The device_lock() makes sure that nbcon_context_try_acquire()
+ * would never need to spin which is important especially with
+ * PREEMPT_RT.
+ */
+ void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @device_lock:
+ *
+ * NBCON callback to begin synchronization with driver code.
+ *
+ * Console drivers typically must deal with access to the hardware
+ * via user input/output (such as an interactive login shell) and
+ * output of kernel messages via printk() calls. This callback is
+ * called by the printk-subsystem whenever it needs to synchronize
+ * with hardware access by the driver. It should be implemented to
+ * use whatever synchronization mechanism the driver is using for
+ * itself (for example, the port lock for uart serial consoles).
+ *
+ * The callback is always called from task context. It may use any
+ * synchronization method required by the driver.
+ *
+ * IMPORTANT: The callback MUST disable migration. The console driver
+ * may be using a synchronization mechanism that already takes
+ * care of this (such as spinlocks). Otherwise this function must
+ * explicitly call migrate_disable().
+ *
+ * The flags argument is provided as a convenience to the driver. It
+ * will be passed again to device_unlock(). It can be ignored if the
+ * driver does not need it.
+ */
+ void (*device_lock)(struct console *con, unsigned long *flags);
+
+ /**
+ * @device_unlock:
+ *
+ * NBCON callback to finish synchronization with driver code.
+ *
+ * It is the counterpart to device_lock().
+ *
+ * This callback is always called from task context. It must
+ * appropriately re-enable migration (depending on how device_lock()
+ * disabled migration).
+ *
+ * The flags argument is the value of the same variable that was
+ * passed to device_lock().
+ */
+ void (*device_unlock)(struct console *con, unsigned long flags);
+
atomic_t __private nbcon_state;
atomic_long_t __private nbcon_seq;
+ struct nbcon_context __private nbcon_device_ctxt;
+ atomic_long_t __private nbcon_prev_seq;
+
struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
+ struct irq_work irq_work;
};
#ifdef CONFIG_LOCKDEP
@@ -378,28 +494,34 @@ extern void console_list_unlock(void) __releases(console_mutex);
extern struct hlist_head console_list;
/**
- * console_srcu_read_flags - Locklessly read the console flags
+ * console_srcu_read_flags - Locklessly read flags of a possibly registered
+ * console
* @con: struct console pointer of console to read flags from
*
- * This function provides the necessary READ_ONCE() and data_race()
- * notation for locklessly reading the console flags. The READ_ONCE()
- * in this function matches the WRITE_ONCE() when @flags are modified
- * for registered consoles with console_srcu_write_flags().
+ * Locklessly reading @con->flags provides a consistent read value because
+ * there is at most one CPU modifying @con->flags and that CPU is using only
+ * read-modify-write operations to do so.
+ *
+ * Requires console_srcu_read_lock to be held, which implies that @con might
+ * be a registered console. The purpose of holding console_srcu_read_lock is
+ * to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED)
+ * and that no exit/cleanup routines will run if the console is currently
+ * undergoing unregistration.
*
- * Only use this function to read console flags when locklessly
- * iterating the console list via srcu.
+ * If the caller is holding the console_list_lock or it is _certain_ that
+ * @con is not and will not become registered, the caller may read
+ * @con->flags directly instead.
*
* Context: Any context.
+ * Return: The current value of the @con->flags field.
*/
static inline short console_srcu_read_flags(const struct console *con)
{
WARN_ON_ONCE(!console_srcu_read_lock_is_held());
/*
- * Locklessly reading console->flags provides a consistent
- * read value because there is at most one CPU modifying
- * console->flags and that CPU is using only read-modify-write
- * operations to do so.
+ * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
*/
return data_race(READ_ONCE(con->flags));
}
@@ -477,13 +599,19 @@ static inline bool console_is_registered(const struct console *con)
hlist_for_each_entry(con, &console_list, node)
#ifdef CONFIG_PRINTK
+extern void nbcon_cpu_emergency_enter(void);
+extern void nbcon_cpu_emergency_exit(void);
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
#else
+static inline void nbcon_cpu_emergency_enter(void) { }
+static inline void nbcon_cpu_emergency_exit(void) { }
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
#endif
extern int console_set_on_cmdline;
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 8a78fabeafc3..af9fe87a0922 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -26,26 +26,26 @@ extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
- ct_user_enter(CONTEXT_USER);
+ ct_user_enter(CT_STATE_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
- ct_user_exit(CONTEXT_USER);
+ ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_USER);
+ __ct_user_enter(CT_STATE_USER);
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_USER);
+ __ct_user_exit(CT_STATE_USER);
}
static inline enum ctx_state exception_enter(void)
@@ -57,7 +57,7 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = __ct_state();
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_exit(prev_ctx);
return prev_ctx;
@@ -67,7 +67,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
{
if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
context_tracking_enabled()) {
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_enter(prev_ctx);
}
}
@@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_GUEST);
+ __ct_user_enter(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
@@ -83,7 +83,7 @@ static __always_inline bool context_tracking_guest_enter(void)
static __always_inline bool context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_GUEST);
+ __ct_user_exit(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
@@ -115,13 +115,17 @@ extern void ct_idle_enter(void);
extern void ct_idle_exit(void);
/*
- * Is the current CPU in an extended quiescent state?
+ * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
+ *
+ * Note that this returns the actual boolean data (watching / not watching),
+ * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
+ * context_tracking.state.
*
* No ordering, as we are sampling CPU-local information.
*/
-static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
+static __always_inline bool rcu_is_watching_curr_cpu(void)
{
- return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
}
/*
@@ -142,9 +146,9 @@ static __always_inline bool warn_rcu_enter(void)
* lots of the actual reporting also relies on RCU.
*/
preempt_disable_notrace();
- if (rcu_dynticks_curr_cpu_in_eqs()) {
+ if (!rcu_is_watching_curr_cpu()) {
ret = true;
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
}
return ret;
@@ -153,7 +157,7 @@ static __always_inline bool warn_rcu_enter(void)
static __always_inline void warn_rcu_exit(bool rcu)
{
if (rcu)
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
preempt_enable_notrace();
}
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index bbff5f7f8803..7b8433d5a8ef 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -7,22 +7,22 @@
#include <linux/context_tracking_irq.h>
/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
-#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+#define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_IDLE = 1,
- CONTEXT_USER = 2,
- CONTEXT_GUEST = 3,
- CONTEXT_MAX = 4,
+ CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */
+ CT_STATE_KERNEL = 0,
+ CT_STATE_IDLE = 1,
+ CT_STATE_USER = 2,
+ CT_STATE_GUEST = 3,
+ CT_STATE_MAX = 4,
};
-/* Even value for idle, else odd. */
-#define RCU_DYNTICKS_IDX CONTEXT_MAX
+/* Odd value for watching, else even. */
+#define CT_RCU_WATCHING CT_STATE_MAX
-#define CT_STATE_MASK (CONTEXT_MAX - 1)
-#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
+#define CT_STATE_MASK (CT_STATE_MAX - 1)
+#define CT_RCU_WATCHING_MASK (~CT_STATE_MASK)
struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER
@@ -39,8 +39,8 @@ struct context_tracking {
atomic_t state;
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
- long dynticks_nesting; /* Track process nesting level. */
- long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+ long nesting; /* Track process nesting level. */
+ long nmi_nesting; /* Track irq/NMI nesting level. */
#endif
};
@@ -56,47 +56,47 @@ static __always_inline int __ct_state(void)
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
-static __always_inline int ct_dynticks(void)
+static __always_inline int ct_rcu_watching(void)
{
- return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu(int cpu)
+static __always_inline int ct_rcu_watching_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline long ct_dynticks_nesting(void)
+static __always_inline long ct_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nesting);
+ return __this_cpu_read(context_tracking.nesting);
}
-static __always_inline long ct_dynticks_nesting_cpu(int cpu)
+static __always_inline long ct_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nesting;
+ return ct->nesting;
}
-static __always_inline long ct_dynticks_nmi_nesting(void)
+static __always_inline long ct_nmi_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+ return __this_cpu_read(context_tracking.nmi_nesting);
}
-static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+static __always_inline long ct_nmi_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nmi_nesting;
+ return ct->nmi_nesting;
}
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
@@ -113,7 +113,7 @@ static __always_inline bool context_tracking_enabled_cpu(int cpu)
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
-static inline bool context_tracking_enabled_this_cpu(void)
+static __always_inline bool context_tracking_enabled_this_cpu(void)
{
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
@@ -123,14 +123,14 @@ static inline bool context_tracking_enabled_this_cpu(void)
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
+ * CT_STATE_DISABLED. This should be used primarily for debugging.
*/
static __always_inline int ct_state(void)
{
int ret;
if (!context_tracking_enabled())
- return CONTEXT_DISABLED;
+ return CT_STATE_DISABLED;
preempt_disable();
ret = __ct_state();
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 0904ba010341..45e598fe3476 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -43,8 +43,30 @@ extern int dump_align(struct coredump_params *cprm, int align);
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len);
extern void do_coredump(const kernel_siginfo_t *siginfo);
+
+/*
+ * Logging for the coredump code, ratelimited.
+ * The TGID and comm fields are added to the message.
+ */
+
+#define __COREDUMP_PRINTK(Level, Format, ...) \
+ do { \
+ char comm[TASK_COMM_LEN]; \
+ \
+ get_task_comm(comm, current); \
+ printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n", \
+ task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__); \
+ } while (0) \
+
+#define coredump_report(fmt, ...) __COREDUMP_PRINTK(KERN_INFO, fmt, ##__VA_ARGS__)
+#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__)
+
#else
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
+
+#define coredump_report(...)
+#define coredump_report_failure(...)
+
#endif
#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 51ac441a37c3..89b0ac0014b0 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -49,12 +49,21 @@
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
- * [59:08] - Unused (SBZ)
- * [63:60] - Version
+ * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
+ * Added in minor version 1.
+ * [55:40] - Unused (SBZ)
+ * [59:56] - Minor Version - previously existing fields are compatible with
+ * all minor versions.
+ * [63:60] - Major Version - previously existing fields mean different things
+ * in new major versions.
*/
#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
-#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
+#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
-#define CS_AUX_HW_ID_CURR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
+#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_MAJOR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f09ace92176e..c13342594278 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -218,6 +218,24 @@ struct coresight_sysfs_link {
const char *target_name;
};
+/* architecturally we have 128 IDs some of which are reserved */
+#define CORESIGHT_TRACE_IDS_MAX 128
+
+/**
+ * Trace ID map.
+ *
+ * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
+ * Initialised so that the reserved IDs are permanently marked as
+ * in use.
+ * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
+ */
+struct coresight_trace_id_map {
+ DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
+ atomic_t __percpu *cpu_map;
+ atomic_t perf_cs_etm_session_active;
+ spinlock_t lock;
+};
+
/**
* struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device.
@@ -271,6 +289,7 @@ struct coresight_device {
bool sysfs_sink_activated;
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
+ struct coresight_trace_id_map perf_sink_id_map;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
@@ -365,7 +384,7 @@ struct coresight_ops_link {
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode);
+ enum cs_mode mode, struct coresight_trace_id_map *id_map);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d4d2f4d1d7cb..7fe0981a7e46 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -577,12 +577,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor.
- */
-#define LATENCY_MULTIPLIER (1000)
-
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int (*init)(struct cpufreq_policy *policy);
@@ -1113,10 +1107,9 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
const char *cell_name,
struct of_phandle_args *args)
{
- struct device_node *cpu_np;
int ret;
- cpu_np = of_cpu_device_node_get(cpu);
+ struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
if (!cpu_np)
return -ENODEV;
@@ -1124,9 +1117,6 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
args);
if (ret < 0)
return ret;
-
- of_node_put(cpu_np);
-
return 0;
}
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9316c39260e0..2361ed4d2b15 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -144,7 +144,8 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ CPUHP_AP_IRQ_EIOINTC_STARTING,
+ CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
@@ -152,7 +153,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
@@ -209,7 +209,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 53158de44b83..9278a50d514f 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -30,7 +30,7 @@
extern unsigned int nr_cpu_ids;
#endif
-static inline void set_nr_cpu_ids(unsigned int nr)
+static __always_inline void set_nr_cpu_ids(unsigned int nr)
{
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
WARN_ON(nr != nr_cpu_ids);
@@ -149,7 +149,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
*
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -160,7 +160,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if all cpus are set.
*/
-static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -172,7 +172,7 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
@@ -186,7 +186,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
*
* Return: >= nr_cpu_ids if no cpus set in all.
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
const struct cpumask *srcp2,
const struct cpumask *srcp3)
@@ -201,7 +201,7 @@ unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
*
* Return: >= nr_cpumask_bits if no CPUs set.
*/
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -213,7 +213,7 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
@@ -229,7 +229,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus unset.
*/
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -239,18 +240,21 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
#if NR_CPUS == 1
/* Uniprocessor: there is only one valid CPU */
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+static __always_inline
+unsigned int cpumask_local_spread(unsigned int i, int node)
{
return 0;
}
-static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return cpumask_first_and(src1p, src2p);
}
-static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
@@ -269,9 +273,9 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp);
*
* Return: >= nr_cpu_ids if no further cpus set in both.
*/
-static inline
+static __always_inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
+ const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -291,7 +295,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
#if NR_CPUS == 1
-static inline
+static __always_inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
@@ -394,7 +398,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* Often used to find any cpu but smp_processor_id() in a mask.
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
@@ -414,7 +418,7 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
*
* Returns >= nr_cpu_ids if no cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
unsigned int cpu)
@@ -436,7 +440,8 @@ unsigned int cpumask_any_and_but(const struct cpumask *mask1,
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
}
@@ -449,7 +454,7 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
+static __always_inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
@@ -465,7 +470,7 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
+static __always_inline
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
@@ -508,12 +513,14 @@ unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -557,7 +564,8 @@ static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp,
*
* Return: true if @cpu is set in @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -571,7 +579,8 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -585,7 +594,8 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -594,7 +604,7 @@ static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_setall(struct cpumask *dstp)
+static __always_inline void cpumask_setall(struct cpumask *dstp)
{
if (small_const_nbits(small_cpumask_bits)) {
cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
@@ -607,7 +617,7 @@ static inline void cpumask_setall(struct cpumask *dstp)
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear(struct cpumask *dstp)
+static __always_inline void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
@@ -620,9 +630,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -634,8 +644,9 @@ static inline bool cpumask_and(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -647,9 +658,9 @@ static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_xor(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -663,9 +674,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_andnot(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -678,8 +689,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp,
*
* Return: true if the cpumasks are equal, false if not
*/
-static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -694,9 +705,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* Return: true if first cpumask ORed with second cpumask == third cpumask,
* otherwise false
*/
-static inline bool cpumask_or_equal(const struct cpumask *src1p,
- const struct cpumask *src2p,
- const struct cpumask *src3p)
+static __always_inline
+bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
cpumask_bits(src3p), small_cpumask_bits);
@@ -710,8 +721,8 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* Return: true if first cpumask ANDed with second cpumask is non-empty,
* otherwise false
*/
-static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -724,8 +735,8 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
*
* Return: true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline bool cpumask_subset(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -737,7 +748,7 @@ static inline bool cpumask_subset(const struct cpumask *src1p,
*
* Return: true if srcp is empty (has no bits set), else false
*/
-static inline bool cpumask_empty(const struct cpumask *srcp)
+static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -748,7 +759,7 @@ static inline bool cpumask_empty(const struct cpumask *srcp)
*
* Return: true if srcp is full (has all bits set), else false
*/
-static inline bool cpumask_full(const struct cpumask *srcp)
+static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -759,7 +770,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
*
* Return: count of bits set in *srcp
*/
-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -771,8 +782,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -784,8 +795,9 @@ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
{
return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -796,8 +808,8 @@ static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_right(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
small_cpumask_bits);
@@ -809,8 +821,8 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_left(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
@@ -821,8 +833,8 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
* @dstp: the result
* @srcp: the input cpumask
*/
-static inline void cpumask_copy(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
@@ -858,8 +870,8 @@ static inline void cpumask_copy(struct cpumask *dstp,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -872,8 +884,8 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parselist_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
@@ -886,7 +898,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -898,7 +910,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*
* Return: -errno, or 0 for success.
*/
-static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -908,7 +920,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*
* Return: size to allocate for a &struct cpumask in bytes
*/
-static inline unsigned int cpumask_size(void)
+static __always_inline unsigned int cpumask_size(void)
{
return bitmap_size(large_cpumask_bits);
}
@@ -920,7 +932,7 @@ static inline unsigned int cpumask_size(void)
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-static inline
+static __always_inline
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
@@ -938,13 +950,13 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
*
* Return: %true if allocation succeeded, %false if not
*/
-static inline
+static __always_inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
-static inline
+static __always_inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
@@ -954,7 +966,7 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
@@ -964,43 +976,43 @@ static inline bool cpumask_available(cpumask_var_t mask)
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
-static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
-static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
-static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
-static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
-static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
-static inline void free_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
-static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
@@ -1058,7 +1070,7 @@ void set_cpu_online(unsigned int cpu, bool online);
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
-static inline int __check_is_bitmap(const unsigned long *bitmap)
+static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
@@ -1073,7 +1085,7 @@ static inline int __check_is_bitmap(const unsigned long *bitmap)
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
@@ -1100,32 +1112,32 @@ static __always_inline unsigned int num_online_cpus(void)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_online_mask);
}
-static inline bool cpu_enabled(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_enabled_mask);
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_present_mask);
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_active_mask);
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_dying_mask);
}
@@ -1138,32 +1150,32 @@ static inline bool cpu_dying(unsigned int cpu)
#define num_present_cpus() 1U
#define num_active_cpus() 1U
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_enabled(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return false;
}
@@ -1197,7 +1209,7 @@ static inline bool cpu_dying(unsigned int cpu)
* Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
-static inline ssize_t
+static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
@@ -1220,9 +1232,9 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
@@ -1242,9 +1254,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index de4cf0ee96f7..835e7b793f6a 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -99,6 +99,7 @@ static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
+#ifdef CONFIG_CPUSETS_V1
#define cpuset_memory_pressure_bump() \
do { \
if (cpuset_memory_pressure_enabled) \
@@ -106,6 +107,9 @@ extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
+#else
+static inline void cpuset_memory_pressure_bump(void) { }
+#endif
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
@@ -113,7 +117,6 @@ extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
-extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
@@ -246,11 +249,6 @@ static inline int cpuset_mem_spread_node(void)
return 0;
}
-static inline int cpuset_slab_spread_node(void)
-{
- return 0;
-}
-
static inline int cpuset_do_page_mem_spread(void)
{
return 0;
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 27c546bfc6d4..a67f2c4940e9 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -233,7 +233,6 @@ struct damos_quota {
unsigned long charge_addr_from;
/* For prioritization */
- unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
/* For feedback loop */
@@ -630,6 +629,8 @@ struct damon_ctx {
unsigned long next_ops_update_sis;
/* for waiting until the execution of the kdamond_fn is started */
struct completion kdamond_started;
+ /* for scheme quotas prioritization */
+ unsigned long *regions_score_histogram;
/* public: */
struct task_struct *kdamond;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index c9c65b132c0f..0928a6c8ae1e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -57,7 +57,6 @@ static const struct file_operations __fops = { \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
- .llseek = no_llseek, \
}
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
index f764e2a7201e..3dd2658a9dab 100644
--- a/include/linux/decompress/unxz.h
+++ b/include/linux/decompress/unxz.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef DECOMPRESS_UNXZ_H
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 53ca3a913d06..8321f65897f3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -524,7 +524,6 @@ int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
-union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_report_zones_args {
diff --git a/include/linux/device.h b/include/linux/device.h
index 34eb20f5966f..b4bde8d22697 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -707,6 +707,8 @@ struct device_physical_location {
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
+ * @dma_iommu: Device is using default IOMMU implementation for DMA and
+ * doesn't rely on dma_ops structure.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -748,7 +750,7 @@ struct device {
struct dev_pin_info *pins;
#endif
struct dev_msi_info msi;
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
@@ -822,6 +824,9 @@ struct device {
#ifdef CONFIG_DMA_NEED_SYNC
bool dma_skip_sync:1;
#endif
+#ifdef CONFIG_IOMMU_DMA
+ bool dma_iommu:1;
+#endif
};
/**
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 807831d6bf0f..cdc4757217f9 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -126,6 +126,9 @@ struct bus_attribute {
int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
+/* Matching function type for drivers/base APIs to find a specific device */
+typedef int (*device_match_t)(struct device *dev, const void *data);
+
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
int device_match_of_node(struct device *dev, const void *np);
@@ -139,8 +142,7 @@ int device_match_any(struct device *dev, const void *unused);
int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *bus_find_device(const struct bus_type *bus, struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
+ const void *data, device_match_t match);
/**
* bus_find_device_by_name - device iterator for locating a particular device
* of a specific name.
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index c576b49c55c2..518c9c83d64b 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -95,7 +95,7 @@ void class_dev_iter_exit(struct class_dev_iter *iter);
int class_for_each_device(const struct class *class, const struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data, int (*match)(struct device *, const void *));
+ const void *data, device_match_t match);
/**
* class_find_device_by_name - device iterator for locating a particular device
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 1fc8b68786de..5c04b8e3833b 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -157,7 +157,7 @@ int __must_check driver_for_each_device(struct device_driver *drv, struct device
void *data, int (*fn)(struct device *dev, void *));
struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
/**
* driver_find_device_by_name - device iterator for locating a particular device
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index edbe13d00776..d7e30d4f7503 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -12,7 +12,7 @@
#include <linux/mem_encrypt.h>
#include <linux/swiotlb.h>
-extern unsigned int zone_dma_bits;
+extern u64 zone_dma_limit;
/*
* Record the mapping of CPU physical to DMA addresses for a given region.
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 29c5650c1038..079b3dec0a16 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -79,6 +79,12 @@ to_dma_fence_array(struct dma_fence *fence)
for (index = 0, fence = dma_fence_array_first(head); fence; \
++(index), fence = dma_fence_array_next(head, index))
+struct dma_fence_array *dma_fence_array_alloc(int num_fences);
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 064bad725061..27d15f60950a 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -9,14 +9,13 @@
#ifndef _DMA_HEAPS_H
#define _DMA_HEAPS_H
-#include <linux/cdev.h>
#include <linux/types.h>
struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return struct dma_buf ptr
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
@@ -41,28 +40,10 @@ struct dma_heap_export_info {
void *priv;
};
-/**
- * dma_heap_get_drvdata() - get per-heap driver data
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The per-heap data for the heap.
- */
void *dma_heap_get_drvdata(struct dma_heap *heap);
-/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The char* for the heap name.
- */
const char *dma_heap_get_name(struct dma_heap *heap);
-/**
- * dma_heap_add - adds a heap to dmabuf heaps
- * @exp_info: information needed to register this heap
- */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 02a1c825896b..b7773201414c 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -13,20 +13,7 @@
struct cma;
struct iommu_ops;
-/*
- * Values for struct dma_map_ops.flags:
- *
- * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
- * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
- * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
- * coherent and it's not an SWIOTLB buffer.
- */
-#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
-#define DMA_F_CAN_SKIP_SYNC (1 << 1)
-
struct dma_map_ops {
- unsigned int flags;
-
void *(*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs);
@@ -37,11 +24,6 @@ struct dma_map_ops {
gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
- struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
- enum dma_data_direction dir, gfp_t gfp,
- unsigned long attrs);
- void (*free_noncontiguous)(struct device *dev, size_t size,
- struct sg_table *sgt, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs);
@@ -88,7 +70,7 @@ struct dma_map_ops {
unsigned long (*get_merge_boundary)(struct device *dev);
};
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
#include <asm/dma-mapping.h>
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
@@ -103,7 +85,7 @@ static inline void set_dma_ops(struct device *dev,
{
dev->dma_ops = dma_ops;
}
-#else /* CONFIG_DMA_OPS */
+#else /* CONFIG_ARCH_HAS_DMA_OPS */
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return NULL;
@@ -112,7 +94,7 @@ static inline void set_dma_ops(struct device *dev,
const struct dma_map_ops *dma_ops)
{
}
-#endif /* CONFIG_DMA_OPS */
+#endif /* CONFIG_ARCH_HAS_DMA_OPS */
#ifdef CONFIG_DMA_CMA
extern struct cma *dma_contiguous_default_area;
@@ -219,20 +201,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
}
#endif /* CONFIG_DMA_GLOBAL_POOL */
-/*
- * This is the actual return value from the ->alloc_noncontiguous method.
- * The users of the DMA API should only care about the sg_table, but to make
- * the DMA-API internal vmaping and freeing easier we stash away the page
- * array as well (except for the fallback case). This can go away any time,
- * e.g. when a vmap-variant that takes a scatterlist comes along.
- */
-struct dma_sgt_handle {
- struct sg_table sgt;
- struct page **pages;
-};
-#define sgt_handle(sgt) \
- container_of((sgt), struct dma_sgt_handle, sgt)
-
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f693aafe221f..1524da363734 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{
- if (dev->dma_parms) {
- dev->dma_parms->max_segment_size = size;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->max_segment_size = size;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
@@ -559,13 +557,11 @@ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
-static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
- if (dev->dma_parms) {
- dev->dma_parms->segment_boundary_mask = mask;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->segment_boundary_mask = mask;
}
static inline unsigned int dma_get_min_align_mask(struct device *dev)
@@ -575,13 +571,12 @@ static inline unsigned int dma_get_min_align_mask(struct device *dev)
return 0;
}
-static inline int dma_set_min_align_mask(struct device *dev,
+static inline void dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask)
{
if (WARN_ON_ONCE(!dev->dma_parms))
- return -EIO;
+ return;
dev->dma_parms->min_align_mask = min_align_mask;
- return 0;
}
#ifndef dma_get_cache_alignment
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
deleted file mode 100644
index 6969391580d2..000000000000
--- a/include/linux/dma/ipu-dma.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_DMA_IPU_DMA_H
-#define __LINUX_DMA_IPU_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-/* IPU DMA Controller channel definitions. */
-enum ipu_channel {
- IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
- IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
- IDMAC_ADC_0 = 1,
- IDMAC_IC_2 = 2,
- IDMAC_ADC_1 = 2,
- IDMAC_IC_3 = 3,
- IDMAC_IC_4 = 4,
- IDMAC_IC_5 = 5,
- IDMAC_IC_6 = 6,
- IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
- IDMAC_IC_8 = 8,
- IDMAC_IC_9 = 9,
- IDMAC_IC_10 = 10,
- IDMAC_IC_11 = 11,
- IDMAC_IC_12 = 12,
- IDMAC_IC_13 = 13,
- IDMAC_SDC_0 = 14, /* Background synchronous display data */
- IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
- IDMAC_SDC_2 = 16,
- IDMAC_SDC_3 = 17,
- IDMAC_ADC_2 = 18,
- IDMAC_ADC_3 = 19,
- IDMAC_ADC_4 = 20,
- IDMAC_ADC_5 = 21,
- IDMAC_ADC_6 = 22,
- IDMAC_ADC_7 = 23,
- IDMAC_PF_0 = 24,
- IDMAC_PF_1 = 25,
- IDMAC_PF_2 = 26,
- IDMAC_PF_3 = 27,
- IDMAC_PF_4 = 28,
- IDMAC_PF_5 = 29,
- IDMAC_PF_6 = 30,
- IDMAC_PF_7 = 31,
-};
-
-/* Order significant! */
-enum ipu_channel_status {
- IPU_CHANNEL_FREE,
- IPU_CHANNEL_INITIALIZED,
- IPU_CHANNEL_READY,
- IPU_CHANNEL_ENABLED,
-};
-
-#define IPU_CHANNELS_NUM 32
-
-enum pixel_fmt {
- /* 1 byte */
- IPU_PIX_FMT_GENERIC,
- IPU_PIX_FMT_RGB332,
- IPU_PIX_FMT_YUV420P,
- IPU_PIX_FMT_YUV422P,
- IPU_PIX_FMT_YUV420P2,
- IPU_PIX_FMT_YVU422P,
- /* 2 bytes */
- IPU_PIX_FMT_RGB565,
- IPU_PIX_FMT_RGB666,
- IPU_PIX_FMT_BGR666,
- IPU_PIX_FMT_YUYV,
- IPU_PIX_FMT_UYVY,
- /* 3 bytes */
- IPU_PIX_FMT_RGB24,
- IPU_PIX_FMT_BGR24,
- /* 4 bytes */
- IPU_PIX_FMT_GENERIC_32,
- IPU_PIX_FMT_RGB32,
- IPU_PIX_FMT_BGR32,
- IPU_PIX_FMT_ABGR32,
- IPU_PIX_FMT_BGRA32,
- IPU_PIX_FMT_RGBA32,
-};
-
-enum ipu_color_space {
- IPU_COLORSPACE_RGB,
- IPU_COLORSPACE_YCBCR,
- IPU_COLORSPACE_YUV
-};
-
-/*
- * Enumeration of IPU rotation modes
- */
-enum ipu_rotate_mode {
- /* Note the enum values correspond to BAM value */
- IPU_ROTATE_NONE = 0,
- IPU_ROTATE_VERT_FLIP = 1,
- IPU_ROTATE_HORIZ_FLIP = 2,
- IPU_ROTATE_180 = 3,
- IPU_ROTATE_90_RIGHT = 4,
- IPU_ROTATE_90_RIGHT_VFLIP = 5,
- IPU_ROTATE_90_RIGHT_HFLIP = 6,
- IPU_ROTATE_90_LEFT = 7,
-};
-
-/*
- * Enumeration of DI ports for ADC.
- */
-enum display_port {
- DISP0,
- DISP1,
- DISP2,
- DISP3
-};
-
-struct idmac_video_param {
- unsigned short in_width;
- unsigned short in_height;
- uint32_t in_pixel_fmt;
- unsigned short out_width;
- unsigned short out_height;
- uint32_t out_pixel_fmt;
- unsigned short out_stride;
- bool graphics_combine_en;
- bool global_alpha_en;
- bool key_color_en;
- enum display_port disp;
- unsigned short out_left;
- unsigned short out_top;
-};
-
-/*
- * Union of initialization parameters for a logical channel. So far only video
- * parameters are used.
- */
-union ipu_channel_param {
- struct idmac_video_param video;
-};
-
-struct idmac_tx_desc {
- struct dma_async_tx_descriptor txd;
- struct scatterlist *sg; /* scatterlist for this */
- unsigned int sg_len; /* tx-descriptor. */
- struct list_head list;
-};
-
-struct idmac_channel {
- struct dma_chan dma_chan;
- dma_cookie_t completed; /* last completed cookie */
- union ipu_channel_param params;
- enum ipu_channel link; /* input channel, linked to the output */
- enum ipu_channel_status status;
- void *client; /* Only one client per channel */
- unsigned int n_tx_desc;
- struct idmac_tx_desc *desc; /* allocated tx-descriptors */
- struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
- struct list_head free_list; /* free tx-descriptors */
- struct list_head queue; /* queued tx-descriptors */
- spinlock_t lock; /* protects sg[0,1], queue */
- struct mutex chan_mutex; /* protects status, cookie, free_list */
- bool sec_chan_en;
- int active_buffer;
- unsigned int eof_irq;
- char eof_name[16]; /* EOF IRQ name for request_irq() */
-};
-
-#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
-#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
-
-#endif /* __LINUX_DMA_IPU_DMA_H */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index 1e491c5dcac2..2dea217629d0 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -136,8 +136,6 @@ u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num);
-void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
- u32 flow_num);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
void (*cleanup)(void *data, dma_addr_t desc_dma),
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6bf3c4fe8511..e28d88066033 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -764,8 +764,6 @@ extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
-extern void efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource, struct resource *bss_resource);
extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index b0fb775a600d..1e50cdb83ae5 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -108,7 +108,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
arch_enter_from_user_mode(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
- CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ CT_WARN_ON(__ct_state() != CT_STATE_USER);
user_exit_irqoff();
instrumentation_begin();
diff --git a/include/linux/err.h b/include/linux/err.h
index b5d9bb2a2349..a4dacd745fcf 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -41,6 +41,9 @@ static inline void * __must_check ERR_PTR(long error)
return (void *) error;
}
+/* Return the pointer in the percpu address space. */
+#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+
/**
* PTR_ERR - Extract the error code from an error pointer.
* @ptr: An error pointer.
@@ -51,6 +54,9 @@ static inline long __must_check PTR_ERR(__force const void *ptr)
return (long) ptr;
}
+/* Read an error pointer from the percpu address space. */
+#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR - Detect an error pointer.
* @ptr: The pointer to check.
@@ -61,6 +67,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+/* Read an error pointer from the percpu address space. */
+#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
* @ptr: The pointer to check.
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 30114c25ad12..ecf203f01034 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -21,7 +21,7 @@
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/crc32.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 01bee2b289c2..b0b821edfd97 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -19,7 +19,6 @@
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -28,6 +27,7 @@
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -278,7 +278,7 @@ struct node_footer {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index f3f0b97b1675..3f49f3df6af5 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -25,12 +25,18 @@ struct space_resv {
#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
-#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
- FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_COLLAPSE_RANGE | \
- FALLOC_FL_ZERO_RANGE | \
- FALLOC_FL_INSERT_RANGE | \
- FALLOC_FL_UNSHARE_RANGE)
+/*
+ * Mask of all supported fallocate modes. Only one can be set at a time.
+ *
+ * In addition to the mode bit, the mode argument can also encode flags.
+ * FALLOC_FL_KEEP_SIZE is the only supported flag so far.
+ */
+#define FALLOC_FL_MODE_MASK (FALLOC_FL_ALLOCATE_RANGE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE | \
+ FALLOC_FL_UNSHARE_RANGE)
/* on ia32 l_start is on a 32-bit boundary */
#if defined(CONFIG_X86_64)
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 354413950d34..8c829d28dcf3 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -2,13 +2,17 @@
#ifndef _LINUX_FAULT_INJECT_H
#define _LINUX_FAULT_INJECT_H
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct dentry;
+struct kmem_cache;
+
#ifdef CONFIG_FAULT_INJECTION
-#include <linux/types.h>
-#include <linux/debugfs.h>
+#include <linux/atomic.h>
#include <linux/configfs.h>
#include <linux/ratelimit.h>
-#include <linux/atomic.h>
/*
* For explanation of the elements of this struct, see
@@ -51,6 +55,28 @@ int setup_fault_attr(struct fault_attr *attr, char *str);
bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
+#else /* CONFIG_FAULT_INJECTION */
+
+struct fault_attr {
+};
+
+#define DECLARE_FAULT_ATTR(name) struct fault_attr name = {}
+
+static inline int setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ return 0; /* Note: 0 means error for __setup() handlers! */
+}
+static inline bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
+{
+ return false;
+}
+static inline bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return false;
+}
+
+#endif /* CONFIG_FAULT_INJECTION */
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *fault_create_debugfs_attr(const char *name,
@@ -87,10 +113,6 @@ static inline void fault_config_init(struct fault_config *config,
#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
-#endif /* CONFIG_FAULT_INJECTION */
-
-struct kmem_cache;
-
#ifdef CONFIG_FAIL_PAGE_ALLOC
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#else
diff --git a/include/linux/fb.h b/include/linux/fb.h
index db7d97b10964..267b59ead432 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -510,6 +510,7 @@ struct fb_info {
void *par;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool skip_panic; /* Do not write to the fb after a panic */
};
/* This will go away
@@ -601,6 +602,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
+extern int devm_register_framebuffer(struct device *dev, struct fb_info *fb_info);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 2944d4aa413b..b1c5722f2b3c 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -22,7 +22,6 @@
* as this is the granularity returned by copy_fdset().
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
-#define NR_OPEN_MAX ~0U
struct fdtable {
unsigned int max_fds;
@@ -106,7 +105,10 @@ struct task_struct;
void put_files_struct(struct files_struct *fs);
int unshare_files(void);
-struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
+struct fd_range {
+ unsigned int from, to;
+};
+struct files_struct *dup_fd(struct files_struct *, struct fd_range *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
@@ -115,8 +117,6 @@ int iterate_fd(struct files_struct *, unsigned,
extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
extern struct file *file_close_fd(unsigned int fd);
-extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
- struct files_struct **new_fdp);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/file.h b/include/linux/file.h
index 59b146a14dca..f98de143245a 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -11,6 +11,7 @@
#include <linux/posix_types.h>
#include <linux/errno.h>
#include <linux/cleanup.h>
+#include <linux/err.h>
struct file;
@@ -35,51 +36,52 @@ static inline void fput_light(struct file *file, int fput_needed)
fput(file);
}
+/* either a reference to struct file + flags
+ * (cloned vs. borrowed, pos locked), with
+ * flags stored in lower bits of value,
+ * or empty (represented by 0).
+ */
struct fd {
- struct file *file;
- unsigned int flags;
+ unsigned long word;
};
#define FDPUT_FPUT 1
#define FDPUT_POS_UNLOCK 2
-static inline void fdput(struct fd fd)
+#define fd_file(f) ((struct file *)((f).word & ~(FDPUT_FPUT|FDPUT_POS_UNLOCK)))
+static inline bool fd_empty(struct fd f)
{
- if (fd.flags & FDPUT_FPUT)
- fput(fd.file);
+ return unlikely(!f.word);
}
-extern struct file *fget(unsigned int fd);
-extern struct file *fget_raw(unsigned int fd);
-extern struct file *fget_task(struct task_struct *task, unsigned int fd);
-extern unsigned long __fdget(unsigned int fd);
-extern unsigned long __fdget_raw(unsigned int fd);
-extern unsigned long __fdget_pos(unsigned int fd);
-extern void __f_unlock_pos(struct file *);
-
-static inline struct fd __to_fd(unsigned long v)
+#define EMPTY_FD (struct fd){0}
+static inline struct fd BORROWED_FD(struct file *f)
{
- return (struct fd){(struct file *)(v & ~3),v & 3};
+ return (struct fd){(unsigned long)f};
}
-
-static inline struct fd fdget(unsigned int fd)
+static inline struct fd CLONED_FD(struct file *f)
{
- return __to_fd(__fdget(fd));
+ return (struct fd){(unsigned long)f | FDPUT_FPUT};
}
-static inline struct fd fdget_raw(unsigned int fd)
+static inline void fdput(struct fd fd)
{
- return __to_fd(__fdget_raw(fd));
+ if (fd.word & FDPUT_FPUT)
+ fput(fd_file(fd));
}
-static inline struct fd fdget_pos(int fd)
-{
- return __to_fd(__fdget_pos(fd));
-}
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
+extern void __f_unlock_pos(struct file *);
+
+struct fd fdget(unsigned int fd);
+struct fd fdget_raw(unsigned int fd);
+struct fd fdget_pos(unsigned int fd);
static inline void fdput_pos(struct fd f)
{
- if (f.flags & FDPUT_POS_UNLOCK)
- __f_unlock_pos(f.file);
+ if (f.word & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(fd_file(f));
fdput(f);
}
@@ -96,6 +98,7 @@ extern void put_unused_fd(unsigned int fd);
DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
get_unused_fd_flags(flags), unsigned flags)
+DEFINE_FREE(fput, struct file *, if (!IS_ERR_OR_NULL(_T)) fput(_T))
/*
* take_fd() will take care to set @fd to -EBADF ensuring that
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index daee999d05f3..bb44224c6676 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -420,28 +420,38 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
#ifdef CONFIG_FILE_LOCKING
static inline int break_lease(struct inode *inode, unsigned int mode)
{
+ struct file_lock_context *flctx;
+
/*
* Since this check is lockless, we must ensure that any refcounts
* taken are done before checking i_flctx->flc_lease. Otherwise, we
* could end up racing with tasks trying to set a new lease on this
* file.
*/
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ if (!list_empty_careful(&flctx->flc_lease))
return __break_lease(inode, mode, FL_LEASE);
return 0;
}
static inline int break_deleg(struct inode *inode, unsigned int mode)
{
+ struct file_lock_context *flctx;
+
/*
* Since this check is lockless, we must ensure that any refcounts
* taken are done before checking i_flctx->flc_lease. Otherwise, we
* could end up racing with tasks trying to set a new lease on this
* file.
*/
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ if (!list_empty_careful(&flctx->flc_lease))
return __break_lease(inode, mode, FL_DELEG);
return 0;
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 64e1506fefb8..7d7578a8eac1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -437,6 +437,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Unconditional jumps, gotol pc + imm32 */
+
+#define BPF_JMP32_A(IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* Relative call */
#define BPF_CALL_REL(TGT) \
diff --git a/include/linux/find.h b/include/linux/find.h
index 5dfca4225fef..68685714bc18 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -52,7 +52,7 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -81,7 +81,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -112,7 +112,7 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -142,7 +142,7 @@ unsigned long find_next_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_or_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -171,7 +171,7 @@ unsigned long find_next_or_bit(const unsigned long *addr1,
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -198,7 +198,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -224,7 +224,7 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number of the N'th set bit.
* If no such, returns >= @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
{
if (n >= size)
@@ -249,7 +249,7 @@ unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsign
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
@@ -276,7 +276,7 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
@@ -332,7 +332,7 @@ unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
@@ -357,7 +357,7 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
* Returns the bit number for the first set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
const unsigned long *addr3,
@@ -381,7 +381,7 @@ unsigned long find_first_and_and_bit(const unsigned long *addr1,
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -402,7 +402,7 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
*
* Returns the bit number of the last set bit, or size.
*/
-static inline
+static __always_inline
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -425,7 +425,7 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size, unsigned long offset)
@@ -448,7 +448,7 @@ unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit_wrap(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
@@ -465,7 +465,7 @@ unsigned long find_next_bit_wrap(const unsigned long *addr,
* Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
* before using it alone.
*/
-static inline
+static __always_inline
unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
unsigned long start, unsigned long n)
{
@@ -506,20 +506,20 @@ extern unsigned long find_next_clump8(unsigned long *clump,
#if defined(__LITTLE_ENDIAN)
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
return find_first_zero_bit(addr, size);
}
@@ -527,7 +527,7 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
#elif defined(__BIG_ENDIAN)
#ifndef find_next_zero_bit_le
-static inline
+static __always_inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
@@ -546,7 +546,7 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
#endif
#ifndef find_first_zero_bit_le
-static inline
+static __always_inline
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -560,7 +560,7 @@ unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
#endif
#ifndef find_next_bit_le
-static inline
+static __always_inline
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 1cca14cf5652..b632eec3ab52 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -134,6 +134,8 @@ struct fw_card {
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
__be32 maint_utility_register;
+
+ struct workqueue_struct *isoc_wq;
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -509,6 +511,7 @@ union fw_iso_callback {
struct fw_iso_context {
struct fw_card *card;
+ struct work_struct work;
int type;
int channel;
int speed;
@@ -528,6 +531,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+
+/**
+ * fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
+ * @ctx: the isochronous context
+ *
+ * Schedule a work item on workqueue to process the isochronous context. The registered callback
+ * function is called by the worker when a queued packet buffer with the interrupt flag is
+ * completed, either after transmission in the IT context or after being filled in the IR context.
+ * The callback function is also called when the header buffer in the context becomes full, If it
+ * is required to process the context in the current context, fw_iso_context_flush_completions() is
+ * available instead.
+ *
+ * Context: Any context.
+ */
+static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
+{
+ queue_work(ctx->card->isoc_wq, &ctx->work);
+}
+
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
new file mode 100644
index 000000000000..9b85a3f028d1
--- /dev/null
+++ b/include/linux/firmware/imx/sm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _SCMI_IMX_H
+#define _SCMI_IMX_H
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#define SCMI_IMX_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */
+#define SCMI_IMX_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */
+#define SCMI_IMX_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */
+#define SCMI_IMX_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */
+#define SCMI_IMX_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */
+#define SCMI_IMX_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */
+
+int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
+int scmi_imx_misc_ctrl_set(u32 id, u32 val);
+
+#endif
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
new file mode 100644
index 000000000000..3abe614ef5f0
--- /dev/null
+++ b/include/linux/folio_queue.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Queue of folios definitions
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/core-api/folio_queue.rst
+ *
+ * for a description of the API.
+ */
+
+#ifndef _LINUX_FOLIO_QUEUE_H
+#define _LINUX_FOLIO_QUEUE_H
+
+#include <linux/pagevec.h>
+
+/*
+ * Segment in a queue of running buffers. Each segment can hold a number of
+ * folios and a portion of the queue can be referenced with the ITER_FOLIOQ
+ * iterator. The possibility exists of inserting non-folio elements into the
+ * queue (such as gaps).
+ *
+ * Explicit prev and next pointers are used instead of a list_head to make it
+ * easier to add segments to tail and remove them from the head without the
+ * need for a lock.
+ */
+struct folio_queue {
+ struct folio_batch vec; /* Folios in the queue segment */
+ u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
+ struct folio_queue *next; /* Next queue segment or NULL */
+ struct folio_queue *prev; /* Previous queue segment of NULL */
+ unsigned long marks; /* 1-bit mark per folio */
+ unsigned long marks2; /* Second 1-bit mark per folio */
+ unsigned long marks3; /* Third 1-bit mark per folio */
+#if PAGEVEC_SIZE > BITS_PER_LONG
+#error marks is not big enough
+#endif
+};
+
+/**
+ * folioq_init - Initialise a folio queue segment
+ * @folioq: The segment to initialise
+ *
+ * Initialise a folio queue segment. Note that the folio pointers are
+ * left uninitialised.
+ */
+static inline void folioq_init(struct folio_queue *folioq)
+{
+ folio_batch_init(&folioq->vec);
+ folioq->next = NULL;
+ folioq->prev = NULL;
+ folioq->marks = 0;
+ folioq->marks2 = 0;
+ folioq->marks3 = 0;
+}
+
+/**
+ * folioq_nr_slots: Query the capacity of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that a particular folio queue segment might hold.
+ * [!] NOTE: This must not be assumed to be the same for every segment!
+ */
+static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
+{
+ return PAGEVEC_SIZE;
+}
+
+/**
+ * folioq_count: Query the occupancy of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that have been added to a folio queue segment.
+ * Note that this is not decreased as folios are removed from a segment.
+ */
+static inline unsigned int folioq_count(struct folio_queue *folioq)
+{
+ return folio_batch_count(&folioq->vec);
+}
+
+/**
+ * folioq_full: Query if a folio queue segment is full
+ * @folioq: The segment to query
+ *
+ * Query if a folio queue segment is fully occupied. Note that this does not
+ * change if folios are removed from a segment.
+ */
+static inline bool folioq_full(struct folio_queue *folioq)
+{
+ //return !folio_batch_space(&folioq->vec);
+ return folioq_count(folioq) >= folioq_nr_slots(folioq);
+}
+
+/**
+ * folioq_is_marked: Check first folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the first mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_mark: Set the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_unmark: Clear the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_is_marked2: Check second folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the second mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_mark2: Set the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_unmark2: Clear the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_is_marked3: Check third folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the third mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks3);
+}
+
+/**
+ * folioq_mark3: Set the third mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the third mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark3(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks3);
+}
+
+/**
+ * folioq_unmark3: Clear the third mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the third mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks3);
+}
+
+static inline unsigned int __folio_order(struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return 0;
+ return folio->_flags_1 & 0xff;
+}
+
+/**
+ * folioq_append: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue and the marks are left
+ * unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = __folio_order(folio);
+ return slot;
+}
+
+/**
+ * folioq_append_mark: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue, the first mark is set
+ * and and the second and third marks are left unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = __folio_order(folio);
+ folioq_mark(folioq, slot);
+ return slot;
+}
+
+/**
+ * folioq_folio: Get a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the folio in the specified slot from a folio queue segment. Note
+ * that no bounds check is made and if the slot hasn't been added into yet, the
+ * pointer will be undefined. If the slot has been cleared, NULL will be
+ * returned.
+ */
+static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->vec.folios[slot];
+}
+
+/**
+ * folioq_folio_order: Get the order of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the order of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the order returned will be 0.
+ */
+static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->orders[slot];
+}
+
+/**
+ * folioq_folio_size: Get the size of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the size of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the size returned will be PAGE_SIZE.
+ */
+static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
+{
+ return PAGE_SIZE << folioq_folio_order(folioq, slot);
+}
+
+/**
+ * folioq_clear: Clear a folio from a folio queue segment
+ * @folioq: The segment to clear
+ * @slot: The folio slot to clear
+ *
+ * Clear a folio from a sequence in a folio queue segment and clear its marks.
+ * The occupancy count is left unchanged.
+ */
+static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
+{
+ folioq->vec.folios[slot] = NULL;
+ folioq_unmark(folioq, slot);
+ folioq_unmark2(folioq, slot);
+ folioq_unmark3(folioq, slot);
+}
+
+#endif /* _LINUX_FOLIO_QUEUE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6ca11e241a24..3559446279c1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -146,8 +146,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
-/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13))
+/* FMODE_* bit 13 */
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)(1 << 14))
@@ -409,10 +408,10 @@ struct address_space_operations {
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
+ struct folio **foliop, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+ struct folio *folio, void *fsdata);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
@@ -683,7 +682,8 @@ struct inode {
#endif
/* Misc */
- unsigned long i_state;
+ u32 i_state;
+ /* 32-bit hole */
struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
@@ -746,6 +746,21 @@ struct inode {
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+/*
+ * Get bit address from inode->i_state to use with wait_var_event()
+ * infrastructre.
+ */
+#define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit))
+
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit);
+
+static inline void inode_wake_up_bit(struct inode *inode, u32 bit)
+{
+ /* Caller is responsible for correct memory barriers. */
+ wake_up_var(inode_state_wait_address(inode, bit));
+}
+
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
static inline unsigned int i_blocksize(const struct inode *node)
@@ -948,6 +963,7 @@ static inline unsigned imajor(const struct inode *inode)
}
struct fown_struct {
+ struct file *file; /* backpointer for security modules */
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
@@ -987,52 +1003,69 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
-/*
- * f_{lock,count,pos_lock} members can be highly contended and share
- * the same cacheline. f_{lock,mode} are very frequently used together
- * and so share the same cacheline as well. The read-mostly
- * f_{path,inode,op} are kept on a separate cacheline.
+/**
+ * struct file - Represents a file
+ * @f_count: reference count
+ * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
+ * @f_mode: FMODE_* flags often used in hotpaths
+ * @f_op: file operations
+ * @f_mapping: Contents of a cacheable, mappable object.
+ * @private_data: filesystem or driver specific data
+ * @f_inode: cached inode
+ * @f_flags: file flags
+ * @f_iocb_flags: iocb flags
+ * @f_cred: stashed credentials of creator/opener
+ * @f_path: path of the file
+ * @f_pos_lock: lock protecting file position
+ * @f_pipe: specific to pipes
+ * @f_pos: file position
+ * @f_security: LSM security context of this file
+ * @f_owner: file owner
+ * @f_wb_err: writeback error
+ * @f_sb_err: per sb writeback errors
+ * @f_ep: link of all epoll hooks for this file
+ * @f_task_work: task work entry point
+ * @f_llist: work queue entrypoint
+ * @f_ra: file's readahead state
+ * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
*/
struct file {
+ atomic_long_t f_count;
+ spinlock_t f_lock;
+ fmode_t f_mode;
+ const struct file_operations *f_op;
+ struct address_space *f_mapping;
+ void *private_data;
+ struct inode *f_inode;
+ unsigned int f_flags;
+ unsigned int f_iocb_flags;
+ const struct cred *f_cred;
+ /* --- cacheline 1 boundary (64 bytes) --- */
+ struct path f_path;
union {
- /* fput() uses task work when closing and freeing file (default). */
- struct callback_head f_task_work;
- /* fput() must use workqueue (most kernel threads). */
- struct llist_node f_llist;
- unsigned int f_iocb_flags;
+ /* regular files (with FMODE_ATOMIC_POS) and directories */
+ struct mutex f_pos_lock;
+ /* pipes */
+ u64 f_pipe;
};
-
- /*
- * Protects f_ep, f_flags.
- * Must not be taken from IRQ context.
- */
- spinlock_t f_lock;
- fmode_t f_mode;
- atomic_long_t f_count;
- struct mutex f_pos_lock;
- loff_t f_pos;
- unsigned int f_flags;
- struct fown_struct f_owner;
- const struct cred *f_cred;
- struct file_ra_state f_ra;
- struct path f_path;
- struct inode *f_inode; /* cached value */
- const struct file_operations *f_op;
-
- u64 f_version;
+ loff_t f_pos;
#ifdef CONFIG_SECURITY
- void *f_security;
+ void *f_security;
#endif
- /* needed for tty driver, and maybe others */
- void *private_data;
-
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ struct fown_struct *f_owner;
+ errseq_t f_wb_err;
+ errseq_t f_sb_err;
#ifdef CONFIG_EPOLL
- /* Used by fs/eventpoll.c to link all the hooks to this file */
- struct hlist_head *f_ep;
-#endif /* #ifdef CONFIG_EPOLL */
- struct address_space *f_mapping;
- errseq_t f_wb_err;
- errseq_t f_sb_err; /* for syncfs */
+ struct hlist_head *f_ep;
+#endif
+ union {
+ struct callback_head f_task_work;
+ struct llist_node f_llist;
+ struct file_ra_state f_ra;
+ freeptr_t f_freeptr;
+ };
+ /* --- cacheline 3 boundary (192 bytes) --- */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -1077,6 +1110,12 @@ struct file_lease;
#define OFFT_OFFSET_MAX type_max(off_t)
#endif
+int file_f_owner_allocate(struct file *file);
+static inline struct fown_struct *file_f_owner(const struct file *file)
+{
+ return READ_ONCE(file->f_owner);
+}
+
extern void send_sigio(struct fown_struct *fown, int fd, int band);
static inline struct inode *file_inode(const struct file *f)
@@ -1125,7 +1164,7 @@ extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force
extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
-extern int send_sigurg(struct fown_struct *fown);
+extern int send_sigurg(struct file *file);
/*
* sb->s_flags. Note that these mirror the equivalent MS_* flags where
@@ -1190,6 +1229,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
+#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
/* Possible states of 'frozen' field */
enum {
@@ -1268,7 +1308,7 @@ struct super_block {
time64_t s_time_min;
time64_t s_time_max;
#ifdef CONFIG_FSNOTIFY
- __u32 s_fsnotify_mask;
+ u32 s_fsnotify_mask;
struct fsnotify_sb_info *s_fsnotify_info;
#endif
@@ -1684,7 +1724,7 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
#define __sb_writers_release(sb, lev) \
- percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], _THIS_IP_)
/**
* __sb_write_started - check if sb freeze level is held
@@ -2074,6 +2114,8 @@ struct file_operations {
#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
/* Contains huge pages */
#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
+/* Treat loff_t as unsigned (e.g., /dev/mem) */
+#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
@@ -2373,8 +2415,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*
* I_REFERENCED Marks the inode as recently references on the LRU list.
*
- * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
- *
* I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
* synchronize competing switching instances and to tell
* wb stat updates to grab the i_pages lock. See
@@ -2397,30 +2437,32 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* i_count.
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
+ *
+ * __I_{SYNC,NEW,LRU_ISOLATING} are used to derive unique addresses to wait
+ * upon. There's one free address left.
*/
-#define I_DIRTY_SYNC (1 << 0)
-#define I_DIRTY_DATASYNC (1 << 1)
-#define I_DIRTY_PAGES (1 << 2)
-#define __I_NEW 3
+#define __I_NEW 0
#define I_NEW (1 << __I_NEW)
-#define I_WILL_FREE (1 << 4)
-#define I_FREEING (1 << 5)
-#define I_CLEAR (1 << 6)
-#define __I_SYNC 7
+#define __I_SYNC 1
#define I_SYNC (1 << __I_SYNC)
-#define I_REFERENCED (1 << 8)
-#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
+#define __I_LRU_ISOLATING 2
+#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
+
+#define I_DIRTY_SYNC (1 << 3)
+#define I_DIRTY_DATASYNC (1 << 4)
+#define I_DIRTY_PAGES (1 << 5)
+#define I_WILL_FREE (1 << 6)
+#define I_FREEING (1 << 7)
+#define I_CLEAR (1 << 8)
+#define I_REFERENCED (1 << 9)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
-#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
-#define I_CREATING (1 << 15)
-#define I_DONTCACHE (1 << 16)
-#define I_SYNC_QUEUED (1 << 17)
-#define I_PINNING_NETFS_WB (1 << 18)
-#define __I_LRU_ISOLATING 19
-#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
+#define I_WB_SWITCH (1 << 12)
+#define I_OVL_INUSE (1 << 13)
+#define I_CREATING (1 << 14)
+#define I_DONTCACHE (1 << 15)
+#define I_SYNC_QUEUED (1 << 16)
+#define I_PINNING_NETFS_WB (1 << 17)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2554,10 +2596,17 @@ struct super_block *sget(struct file_system_type *type,
struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
-#define fops_get(fops) \
- (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
-#define fops_put(fops) \
- do { if (fops) module_put((fops)->owner); } while(0)
+#define fops_get(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ (((_fops) && try_module_get((_fops)->owner) ? (_fops) : NULL)); \
+})
+
+#define fops_put(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ if (_fops) \
+ module_put((_fops)->owner); \
+})
+
/*
* This one is to be used *ONLY* from ->open() instances.
* fops must be non-NULL, pinned down *and* module dependencies
@@ -3033,7 +3082,12 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
-extern int inode_init_always(struct super_block *, struct inode *);
+extern int inode_init_always_gfp(struct super_block *, struct inode *, gfp_t);
+static inline int inode_init_always(struct super_block *sb, struct inode *inode)
+{
+ return inode_init_always_gfp(sb, inode, GFP_NOFS);
+}
+
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern struct inode * igrab(struct inode *);
@@ -3100,7 +3154,14 @@ static inline bool is_zero_ino(ino_t ino)
return (u32)ino == 0;
}
-extern void __iget(struct inode * inode);
+/*
+ * inode->i_lock must be held
+ */
+static inline void __iget(struct inode *inode)
+{
+ atomic_inc(&inode->i_count);
+}
+
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
@@ -3178,11 +3239,12 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-#define no_llseek NULL
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
int whence, loff_t maxsize, loff_t eof);
+loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
+ u64 *cookie);
extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
@@ -3220,7 +3282,9 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
}
#endif
+bool inode_dio_finished(const struct inode *inode);
void inode_dio_wait(struct inode *inode);
+void inode_dio_wait_interruptible(struct inode *inode);
/**
* inode_dio_begin - signal start of a direct I/O requests
@@ -3244,7 +3308,7 @@ static inline void inode_dio_begin(struct inode *inode)
static inline void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
- wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+ wake_up_var(&inode->i_dio_count);
}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
@@ -3337,7 +3401,7 @@ extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
+ struct folio **foliop, void **fsdata);
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
@@ -3461,7 +3525,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- kiocb_flags |= IOCB_NOIO;
}
if (flags & RWF_ATOMIC) {
if (rw_type != WRITE)
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index df25fffdc0ae..623ccfcbf39c 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -59,7 +59,8 @@ static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
int devad, int regnum, u16 value)
{ return -EINVAL; }
-struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
+ void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
#endif
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 083c860fd28e..c90ec889bfc2 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -436,7 +436,7 @@ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
u16 if_id);
-extern struct bus_type fsl_mc_bus_type;
+extern const struct bus_type fsl_mc_bus_type;
extern struct device_type fsl_mc_bus_dprc_type;
extern struct device_type fsl_mc_bus_dpni_type;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 8be029bc50b1..3ecf7768e577 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -217,7 +217,6 @@ struct fsnotify_group {
#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
-#define FSNOTIFY_GROUP_NOFS 0x04 /* group lock is not direct reclaim safe */
int flags;
unsigned int owner_flags; /* stored flags of mark_mutex owner */
@@ -268,22 +267,19 @@ struct fsnotify_group {
static inline void fsnotify_group_lock(struct fsnotify_group *group)
{
mutex_lock(&group->mark_mutex);
- if (group->flags & FSNOTIFY_GROUP_NOFS)
- group->owner_flags = memalloc_nofs_save();
+ group->owner_flags = memalloc_nofs_save();
}
static inline void fsnotify_group_unlock(struct fsnotify_group *group)
{
- if (group->flags & FSNOTIFY_GROUP_NOFS)
- memalloc_nofs_restore(group->owner_flags);
+ memalloc_nofs_restore(group->owner_flags);
mutex_unlock(&group->mark_mutex);
}
static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
{
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
- if (group->flags & FSNOTIFY_GROUP_NOFS)
- WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
}
/* When calling fsnotify tell it if the data is a path or inode */
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index f3512fddf3d7..5b51c3d582d6 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -41,6 +41,7 @@
#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math.h>
+#include <linux/slab.h>
#include <linux/types.h>
struct genradix_root;
@@ -48,10 +49,63 @@ struct genradix_root;
#define GENRADIX_NODE_SHIFT 9
#define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT)
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
struct __genradix {
struct genradix_root *root;
};
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[GENRADIX_NODE_SIZE];
+ };
+};
+
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kfree(node);
+}
+
/*
* NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE:
*/
@@ -128,6 +182,30 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
+static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+ unsigned shift = genradix_depth_shift(level);
+
+ if (unlikely(ilog2(offset) >= genradix_depth_shift(level)))
+ return NULL;
+
+ while (n && shift > GENRADIX_NODE_SHIFT) {
+ shift -= GENRADIX_ARY_SHIFT;
+ n = n->children[offset >> shift];
+ offset &= (1UL << shift) - 1;
+ }
+
+ return n ? &n->data[offset] : NULL;
+}
+
+#define genradix_ptr_inlined(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
void *__genradix_ptr(struct __genradix *, size_t);
/**
@@ -142,7 +220,24 @@ void *__genradix_ptr(struct __genradix *, size_t);
__genradix_ptr(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx)))
-void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
+void *__genradix_ptr_alloc(struct __genradix *, size_t,
+ struct genradix_node **, gfp_t);
+
+#define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ NULL, _gfp)))
+
+#define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp)))
/**
* genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
@@ -157,7 +252,13 @@ void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
(__genradix_cast(_radix) \
__genradix_ptr_alloc(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx), \
- _gfp))
+ NULL, _gfp))
+
+#define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp))
struct genradix_iter {
size_t offset;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f53f76e0b17e..a951de920e20 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -319,7 +319,7 @@ static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order
}
static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{
- return __folio_alloc_node(gfp, order, numa_node_id());
+ return __folio_alloc_node_noprof(gfp, order, numa_node_id());
}
static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid)
@@ -446,4 +446,27 @@ extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
+#ifdef CONFIG_CONTIG_ALLOC
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
+
+ page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
+
+ return page ? page_folio(page) : NULL;
+}
+#else
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ return NULL;
+}
+#endif
+/* This should be paired with folio_put() rather than free_contig_range(). */
+#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 313be4ad79fd..65db9349f905 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -215,7 +215,8 @@ enum {
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules.
+ * implicit rules. Please note that all of them must be used along with
+ * %__GFP_DIRECT_RECLAIM flag.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
@@ -246,11 +247,14 @@ enum {
* cannot handle allocation failures. The allocation could block
* indefinitely but will never return with failure. Testing for
* failure is pointless.
+ * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM.
+ * It should _never_ be used in non-sleepable contexts.
* New users should be evaluated carefully (and the flag should be
* used only when there is no reasonable failure policy) but it is
* definitely preferable to use the flag rather than opencode endless
* loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
+ * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is
+ * not supported. Please consider using kvmalloc() instead.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 063f71b18a7c..2d105be7bbc3 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -17,15 +17,9 @@
struct device;
/* make these flag values available regardless of GPIO kconfig options */
-#define GPIOF_DIR_OUT (0 << 0)
-#define GPIOF_DIR_IN (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH (1 << 1)
-
-#define GPIOF_IN (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+#define GPIOF_IN ((1 << 0))
+#define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1))
+#define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1))
/* Gpio pin is active-low */
#define GPIOF_ACTIVE_LOW (1 << 2)
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 3bb87bf6bc65..455f855bc084 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -59,6 +59,15 @@ enum hdmi_infoframe_type {
#define HDMI_DRM_INFOFRAME_SIZE 26
#define HDMI_VENDOR_INFOFRAME_SIZE 4
+/*
+ * HDMI 1.3a table 5-14 states that the largest InfoFrame_length is 27,
+ * not including the packet header or checksum byte. We include the
+ * checksum byte in HDMI_INFOFRAME_HEADER_SIZE, so this should allow
+ * HDMI_INFOFRAME_SIZE(MAX) to be the largest buffer we could ever need
+ * for any HDMI infoframe.
+ */
+#define HDMI_MAX_INFOFRAME_SIZE 27
+
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 1533c9dcd3a6..121d5b8bc867 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -46,7 +46,7 @@ struct hid_item {
__s16 s16;
__u32 u32;
__s32 s32;
- __u8 *longdata;
+ const __u8 *longdata;
} data;
};
@@ -600,9 +600,9 @@ struct hid_driver;
struct hid_ll_driver;
struct hid_device { /* device report descriptor */
- __u8 *dev_rdesc;
+ const __u8 *dev_rdesc;
unsigned dev_rsize;
- __u8 *rdesc;
+ const __u8 *rdesc;
unsigned rsize;
struct hid_collection *collection; /* List of HID collections */
unsigned collection_size; /* Number of allocated hid_collections */
@@ -822,7 +822,7 @@ struct hid_driver {
struct hid_usage *usage, __s32 value);
void (*report)(struct hid_device *hdev, struct hid_report *report);
- __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
+ const __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
unsigned int *size);
int (*input_mapping)(struct hid_device *hdev,
@@ -940,6 +940,8 @@ extern void hidinput_report_event(struct hid_device *hid, struct hid_report *rep
extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
+struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
+ unsigned int application, unsigned int usage);
int hid_set_field(struct hid_field *, unsigned, __s32);
int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
int interrupt);
@@ -953,7 +955,7 @@ struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
enum hid_report_type type, unsigned int id,
unsigned int application);
-int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
enum hid_report_type type, unsigned int id,
unsigned int field_index,
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index d4d063cf63b5..6a47223e6460 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -212,7 +212,7 @@ int hid_bpf_connect_device(struct hid_device *hdev);
void hid_bpf_disconnect_device(struct hid_device *hdev);
void hid_bpf_destroy_device(struct hid_device *hid);
int hid_bpf_device_init(struct hid_device *hid);
-u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
+u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size);
#else /* CONFIG_HID_BPF */
static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
u8 *data, u32 *size, int interrupt,
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index cd67f4ca5599..18fd30a288de 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -32,6 +32,7 @@ struct hidraw_list {
struct hidraw *hidraw;
struct list_head node;
struct mutex read_mutex;
+ bool revoked;
};
#ifdef CONFIG_HIDRAW
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e25d9ebfdf89..67d0ab3c3bba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -76,9 +76,9 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
/*
* Mask of all large folio orders supported for file THP. Folios in a DAX
* file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
- * it.
+ * it. Same to PFNMAPs where there's neither page* nor pagecache.
*/
-#define THP_ORDERS_ALL_FILE_DAX \
+#define THP_ORDERS_ALL_SPECIAL \
(BIT(PMD_ORDER) | BIT(PUD_ORDER))
#define THP_ORDERS_ALL_FILE_DEFAULT \
((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
@@ -87,7 +87,7 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
* Mask of all large folio orders supported for THP.
*/
#define THP_ORDERS_ALL \
- (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
#define TVA_IN_PF (1 << 1) /* Page fault handler */
@@ -96,6 +96,8 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
+#define split_folio(f) split_folio_to_list(f, NULL)
+
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PUD_SHIFT PUD_SHIFT
@@ -114,6 +116,53 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+enum mthp_stat_item {
+ MTHP_STAT_ANON_FAULT_ALLOC,
+ MTHP_STAT_ANON_FAULT_FALLBACK,
+ MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
+ MTHP_STAT_SHMEM_ALLOC,
+ MTHP_STAT_SHMEM_FALLBACK,
+ MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+ MTHP_STAT_SPLIT,
+ MTHP_STAT_SPLIT_FAILED,
+ MTHP_STAT_SPLIT_DEFERRED,
+ MTHP_STAT_NR_ANON,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
+ __MTHP_STAT_COUNT
+};
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+ unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+ if (order <= 0 || order > PMD_ORDER)
+ return;
+
+ this_cpu_add(mthp_stats.stats[order][item], delta);
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+ mod_mthp_stat(order, item, 1);
+}
+
+#else
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long transparent_hugepage_flags;
@@ -269,41 +318,6 @@ struct thpsize {
#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
-enum mthp_stat_item {
- MTHP_STAT_ANON_FAULT_ALLOC,
- MTHP_STAT_ANON_FAULT_FALLBACK,
- MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
- MTHP_STAT_SWPOUT,
- MTHP_STAT_SWPOUT_FALLBACK,
- MTHP_STAT_SHMEM_ALLOC,
- MTHP_STAT_SHMEM_FALLBACK,
- MTHP_STAT_SHMEM_FALLBACK_CHARGE,
- MTHP_STAT_SPLIT,
- MTHP_STAT_SPLIT_FAILED,
- MTHP_STAT_SPLIT_DEFERRED,
- __MTHP_STAT_COUNT
-};
-
-struct mthp_stat {
- unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
-};
-
-#ifdef CONFIG_SYSFS
-DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
-
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
- if (order <= 0 || order > PMD_ORDER)
- return;
-
- this_cpu_inc(mthp_stats.stats[order][item]);
-}
-#else
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
-}
-#endif
-
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -314,14 +328,29 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
unsigned long len, unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags);
-bool can_split_folio(struct folio *folio, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int min_order_for_split(struct folio *folio);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list_to_order(page, NULL, 0);
+ struct folio *folio = page_folio(page);
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * split_huge_page() locks the page before splitting and
+ * expects the same page that has been split to be locked when
+ * returned. split_folio(page_folio(page)) cannot be used here
+ * because it converts the page to folio and passes the head
+ * page to be split.
+ */
+ return split_huge_page_to_list_to_order(page, NULL, ret);
}
-void deferred_split_folio(struct folio *folio);
+void deferred_split_folio(struct folio *folio, bool partially_mapped);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
@@ -342,6 +371,17 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+#else
+static inline int
+change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags) { return 0; }
+#endif
+
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
@@ -410,11 +450,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
-static inline bool is_huge_zero_pud(pud_t pud)
-{
- return false;
-}
-
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
@@ -470,7 +505,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
}
static inline bool
-can_split_folio(struct folio *folio, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
return false;
}
@@ -484,7 +519,13 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_folio(struct folio *folio) {}
+
+static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ return 0;
+}
+
+static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
@@ -555,11 +596,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return false;
}
-static inline bool is_huge_zero_pud(pud_t pud)
-{
- return false;
-}
-
static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
{
return;
@@ -585,6 +621,19 @@ static inline int next_order(unsigned long *orders, int prev)
{
return 0;
}
+
+static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+}
+
+static inline int change_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pud_t *pudp,
+ unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int split_folio_to_list_to_order(struct folio *folio,
@@ -598,7 +647,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}
-#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
-#define split_folio(f) split_folio_to_order(f, 0)
-
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 45bf05ad5c53..e4697539b665 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -127,9 +127,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
unsigned long len);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
-struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
@@ -695,6 +692,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback);
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask);
+
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
@@ -899,10 +899,11 @@ static inline bool hugepage_movable_supported(struct hstate *h)
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
- if (hugepage_movable_supported(h))
- return GFP_HIGHUSER_MOVABLE;
- else
- return GFP_HIGHUSER;
+ gfp_t gfp = __GFP_COMP | __GFP_NOWARN;
+
+ gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
+
+ return gfp;
}
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
@@ -1062,6 +1063,13 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
}
static inline struct folio *
+alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ return NULL;
+}
+
+static inline struct folio *
alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback)
@@ -1251,7 +1259,7 @@ static inline __init void hugetlb_cma_reserve(int order)
}
#endif
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
return page_count(virt_to_page(pte)) > 1;
@@ -1287,8 +1295,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
static inline pte_t *
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
{
-#if defined(CONFIG_HUGETLB_PAGE) && \
- defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/*
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index e94314760aab..5c6a421ad580 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -481,7 +481,6 @@ devm_hwmon_device_register_with_info(struct device *dev,
const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
-void devm_hwmon_device_unregister(struct device *dev);
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 377def497298..388ce71a29a9 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -761,6 +761,9 @@ struct i2c_adapter {
struct regulator *bus_regulator;
struct dentry *debugfs;
+
+ /* 7bit address space */
+ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 074f632868d9..2a1ed05d5782 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -278,6 +278,20 @@ enum i3c_bus_mode {
};
/**
+ * enum i3c_open_drain_speed - I3C open-drain speed
+ * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first
+ * broadcast address. The first broadcast address at this speed
+ * will be visible to all devices on the I3C bus. I3C devices
+ * working in I2C mode will turn off their spike filter when
+ * switching into I3C mode.
+ * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode.
+ */
+enum i3c_open_drain_speed {
+ I3C_OPEN_DRAIN_SLOW_SPEED,
+ I3C_OPEN_DRAIN_NORMAL_SPEED,
+};
+
+/**
* enum i3c_addr_slot_status - I3C address slot status
* @I3C_ADDR_SLOT_FREE: address is free
* @I3C_ADDR_SLOT_RSVD: address is reserved
@@ -436,6 +450,7 @@ struct i3c_bus {
* NULL.
* @enable_hotjoin: enable hot join event detect.
* @disable_hotjoin: disable hot join event detect.
+ * @set_speed: adjust I3C open drain mode timing.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -464,6 +479,7 @@ struct i3c_master_controller_ops {
struct i3c_ibi_slot *slot);
int (*enable_hotjoin)(struct i3c_master_controller *master);
int (*disable_hotjoin)(struct i3c_master_controller *master);
+ int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
};
/**
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index 8336b2f6f834..56c280eb2d4f 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,9 +21,9 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
-extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
+extern void clockevent_i8253_disable(void);
extern void setup_pit_timer(void);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 30cef3b940eb..456bca45ff05 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -20,7 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/bitfield.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/*
* DS bit usage
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
index 8099759d7242..37d56914d485 100644
--- a/include/linux/iio/backend.h
+++ b/include/linux/iio/backend.h
@@ -3,6 +3,7 @@
#define _IIO_BACKEND_H_
#include <linux/types.h>
+#include <linux/iio/iio.h>
struct iio_chan_spec;
struct fwnode_handle;
@@ -17,11 +18,13 @@ enum iio_backend_data_type {
};
enum iio_backend_data_source {
- IIO_BACKEND_INTERNAL_CONTINUOS_WAVE,
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE,
IIO_BACKEND_EXTERNAL,
IIO_BACKEND_DATA_SOURCE_MAX
};
+#define iio_backend_debugfs_ptr(ptr) PTR_IF(IS_ENABLED(CONFIG_DEBUG_FS), ptr)
+
/**
* IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute
* @_name: Attribute name
@@ -54,6 +57,8 @@ enum iio_backend_test_pattern {
IIO_BACKEND_NO_TEST_PATTERN,
/* modified prbs9 */
IIO_BACKEND_ADI_PRBS_9A = 32,
+ /* modified prbs23 */
+ IIO_BACKEND_ADI_PRBS_23A,
IIO_BACKEND_TEST_PATTERN_MAX
};
@@ -81,6 +86,9 @@ enum iio_backend_sample_trigger {
* @extend_chan_spec: Extend an IIO channel.
* @ext_info_set: Extended info setter.
* @ext_info_get: Extended info getter.
+ * @read_raw: Read a channel attribute from a backend device
+ * @debugfs_print_chan_status: Print channel status into a buffer.
+ * @debugfs_reg_access: Read or write register value of backend.
**/
struct iio_backend_ops {
int (*enable)(struct iio_backend *back);
@@ -113,11 +121,31 @@ struct iio_backend_ops {
const char *buf, size_t len);
int (*ext_info_get)(struct iio_backend *back, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
+ int (*read_raw)(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+ int (*debugfs_print_chan_status)(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+ int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+};
+
+/**
+ * struct iio_backend_info - info structure for an iio_backend
+ * @name: Backend name.
+ * @ops: Backend operations.
+ */
+struct iio_backend_info {
+ const char *name;
+ const struct iio_backend_ops *ops;
};
int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
+int iio_backend_enable(struct iio_backend *back);
+void iio_backend_disable(struct iio_backend *back);
int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
const struct iio_backend_data_fmt *data);
int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
@@ -141,17 +169,41 @@ ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
const char *buf, size_t len);
ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
-
-int iio_backend_extend_chan_spec(struct iio_dev *indio_dev,
- struct iio_backend *back,
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+int iio_backend_extend_chan_spec(struct iio_backend *back,
struct iio_chan_spec *chan);
void *iio_backend_get_priv(const struct iio_backend *conv);
struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode);
struct iio_backend *
__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
struct fwnode_handle *fwnode);
int devm_iio_backend_register(struct device *dev,
- const struct iio_backend_ops *ops, void *priv);
+ const struct iio_backend_info *info, void *priv);
+
+static inline int iio_backend_read_scale(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+
+static inline int iio_backend_read_offset(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2,
+ IIO_CHAN_INFO_OFFSET);
+}
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev);
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 894309294182..18779b631e90 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -609,7 +609,7 @@ struct iio_dev {
int scan_bytes;
const unsigned long *available_scan_masks;
- unsigned masklength;
+ unsigned __private masklength;
const unsigned long *active_scan_mask;
bool scan_timestamp;
struct iio_trigger *trig;
@@ -810,6 +810,23 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
}
#endif
+/**
+ * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
+ * @indio_dev: iio_dev associated with the device that will have triggers suspended
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_suspend_triggering(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_resume_triggering() - resume trigger attached to an iio_dev
+ * that was previously suspended with iio_device_suspend_triggering()
+ * @indio_dev: iio_dev associated with the device that will have triggers resumed
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_resume_triggering(struct iio_dev *indio_dev);
+
#ifdef CONFIG_ACPI
bool iio_read_acpi_mount_matrix(struct device *dev,
struct iio_mount_matrix *orientation,
@@ -855,6 +872,26 @@ static inline const struct iio_scan_type
return &chan->scan_type;
}
+/**
+ * iio_get_masklength - Get length of the channels mask
+ * @indio_dev: the IIO device to get the masklength for
+ */
+static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
+{
+ return ACCESS_PRIVATE(indio_dev, masklength);
+}
+
+int iio_active_scan_mask_index(struct iio_dev *indio_dev);
+
+/**
+ * iio_for_each_active_channel - Iterated over active channels
+ * @indio_dev: the IIO device
+ * @chan: Holds the index of the enabled channel
+ */
+#define iio_for_each_active_channel(indio_dev, chan) \
+ for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
+ iio_get_masklength(indio_dev))
+
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index b8d8d69eba29..90867f44ab4d 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -34,52 +34,6 @@ struct matrix_keymap_data {
unsigned int keymap_size;
};
-/**
- * struct matrix_keypad_platform_data - platform-dependent keypad data
- * @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: pointer to array of gpio numbers representing rows
- * @col_gpios: pointer to array of gpio numbers reporesenting colums
- * @num_row_gpios: actual number of row gpios used by device
- * @num_col_gpios: actual number of col gpios used by device
- * @col_scan_delay_us: delay, measured in microseconds, that is
- * needed before we can keypad after activating column gpio
- * @debounce_ms: debounce interval in milliseconds
- * @clustered_irq: may be specified if interrupts of all row/column GPIOs
- * are bundled to one single irq
- * @clustered_irq_flags: flags that are needed for the clustered irq
- * @active_low: gpio polarity
- * @wakeup: controls whether the device should be set up as wakeup
- * source
- * @no_autorepeat: disable key autorepeat
- * @drive_inactive_cols: drive inactive columns during scan, rather than
- * making them inputs.
- *
- * This structure represents platform-specific data that use used by
- * matrix_keypad driver to perform proper initialization.
- */
-struct matrix_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- const unsigned int *row_gpios;
- const unsigned int *col_gpios;
-
- unsigned int num_row_gpios;
- unsigned int num_col_gpios;
-
- unsigned int col_scan_delay_us;
-
- /* key debounce interval in milli-second */
- unsigned int debounce_ms;
-
- unsigned int clustered_irq;
- unsigned int clustered_irq_flags;
-
- bool active_low;
- bool wakeup;
- bool no_autorepeat;
- bool drive_inactive_cols;
-};
-
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
const char *keymap_name,
unsigned int rows, unsigned int cols,
@@ -88,6 +42,4 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
int matrix_keypad_parse_properties(struct device *dev,
unsigned int *rows, unsigned int *cols);
-#define matrix_keypad_parse_of_params matrix_keypad_parse_properties
-
#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h
new file mode 100644
index 000000000000..11ee185566c3
--- /dev/null
+++ b/include/linux/intel_vsec.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_VSEC_H
+#define _INTEL_VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+
+#define VSEC_CAP_TELEMETRY BIT(0)
+#define VSEC_CAP_WATCHER BIT(1)
+#define VSEC_CAP_CRASHLOG BIT(2)
+#define VSEC_CAP_SDSI BIT(3)
+#define VSEC_CAP_TPMI BIT(4)
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+ VSEC_ID_SDSI = 65,
+ VSEC_ID_TPMI = 66,
+};
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/**
+ * struct pmt_callbacks - Callback infrastructure for PMT devices
+ * ->read_telem() when specified, called by client driver to access PMT data (instead
+ * of direct copy).
+ * @pdev: PCI device reference for the callback's use
+ * @guid: ID of data to acccss
+ * @data: buffer for the data to be copied
+ * @count: size of buffer
+ */
+struct pmt_callbacks {
+ int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, u32 count);
+};
+
+/**
+ * struct intel_vsec_platform_info - Platform specific data
+ * @parent: parent device in the auxbus chain
+ * @headers: list of headers to define the PMT client devices to create
+ * @priv_data: private data, usable by parent devices, currently a callback
+ * @caps: bitmask of PMT capabilities for the given headers
+ * @quirks: bitmask of VSEC device quirks
+ * @base_addr: allow a base address to be specified (rather than derived)
+ */
+struct intel_vsec_platform_info {
+ struct device *parent;
+ struct intel_vsec_header **headers;
+ void *priv_data;
+ unsigned long caps;
+ unsigned long quirks;
+ u64 base_addr;
+};
+
+/**
+ * struct intel_sec_device - Auxbus specific device information
+ * @auxdev: auxbus device struct for auxbus access
+ * @pcidev: pci device associated with the device
+ * @resource: any resources shared by the parent
+ * @ida: id reference
+ * @num_resources: number of resources
+ * @id: xarray id
+ * @priv_data: any private data needed
+ * @quirks: specified quirks
+ * @base_addr: base address of entries (if specified)
+ */
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ int num_resources;
+ int id; /* xa */
+ void *priv_data;
+ size_t priv_data_size;
+ unsigned long quirks;
+ u64 base_addr;
+};
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name);
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+
+#if IS_ENABLED(CONFIG_INTEL_VSEC)
+void intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info);
+#else
+static inline void intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+}
+#endif
+#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 3f30c88e0b4c..457151f9f263 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -276,7 +276,7 @@ struct irq_affinity_notify {
#define IRQ_AFFINITY_MAX_SETS 4
/**
- * struct irq_affinity - Description for automatic irq affinity assignements
+ * struct irq_affinity - Description for automatic irq affinity assignments
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
@@ -594,7 +594,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
- void (*action)(struct softirq_action *);
+ void (*action)(void);
};
asmlinkage void do_softirq(void);
@@ -609,7 +609,7 @@ static inline void do_softirq_post_smp_call_flush(unsigned int unused)
}
#endif
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void open_softirq(int nr, void (*action)(void));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index f9a81761bfce..b1ecfc3cd5bc 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -171,6 +171,10 @@ struct io_pgtable_cfg {
u64 ttbr[4];
u32 n_ttbrs;
} apple_dart_cfg;
+
+ struct {
+ int nid;
+ } amd;
};
};
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 447fbfd32215..c189d36ad55e 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -23,6 +23,15 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
return sqe->cmd;
}
+static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
+}
+#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
+ io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
+ ((pdu_type *)&(cmd)->pdu) \
+)
+
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
@@ -48,6 +57,9 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags);
+/* Execute the request from a blocking context */
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
+
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
@@ -67,6 +79,9 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
}
+static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
+{
+}
#endif
/*
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 3315005df117..4b9ba523978d 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -239,6 +239,9 @@ struct io_ring_ctx {
struct io_rings *rings;
struct percpu_ref refs;
+ clockid_t clockid;
+ enum tk_offsets clock_offset;
+
enum task_work_notify_mode notify_method;
unsigned sq_thread_idle;
} ____cacheline_aligned_in_smp;
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6fc1c858013d..4ad12a3c8bae 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -257,11 +257,7 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
- const struct iomap_ops *ops);
-int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
- struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
- int (*punch)(struct inode *inode, loff_t pos, loff_t length));
-
+ const struct iomap_ops *ops, void *private);
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
@@ -277,6 +273,13 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
const struct iomap_ops *ops);
+
+typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
+ struct iomap *iomap);
+void iomap_file_buffered_write_punch_delalloc(struct inode *inode, loff_t pos,
+ loff_t length, ssize_t written, unsigned flag,
+ struct iomap *iomap, iomap_punch_t punch);
+
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
new file mode 100644
index 000000000000..508beaa44c39
--- /dev/null
+++ b/include/linux/iommu-dma.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
+ * DMA operations that map physical memory through IOMMU.
+ */
+#ifndef _LINUX_IOMMU_DMA_H
+#define _LINUX_IOMMU_DMA_H
+
+#include <linux/dma-direction.h>
+
+#ifdef CONFIG_IOMMU_DMA
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return dev->dma_iommu;
+}
+#else
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs);
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+unsigned long iommu_dma_get_merge_boundary(struct device *dev);
+size_t iommu_dma_opt_mapping_size(void);
+size_t iommu_dma_max_mapping_size(struct device *dev);
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs);
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
+ vunmap(vaddr);
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+
+#endif /* _LINUX_IOMMU_DMA_H */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index ffc3a949f837..30f832a60ccb 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -6,17 +6,17 @@
#ifndef __LINUX_IOMMUFD_H
#define __LINUX_IOMMUFD_H
-#include <linux/types.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
struct device;
-struct iommufd_device;
-struct page;
-struct iommufd_ctx;
-struct iommufd_access;
struct file;
struct iommu_group;
+struct iommufd_access;
+struct iommufd_ctx;
+struct iommufd_device;
+struct page;
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index db1249cd9692..b25377b6ea98 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task_is_realtime(task))
+ else if (rt_or_dl_task_policy(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
index f0e99fc7dd8b..2bd1661fe9ad 100644
--- a/include/linux/ioremap.h
+++ b/include/linux/ioremap.h
@@ -4,6 +4,7 @@
#include <linux/kasan.h>
#include <asm/pgtable.h>
+#include <asm/vmalloc.h>
#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
/*
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
index 270454a6703d..c4aa58032faf 100644
--- a/include/linux/iov_iter.h
+++ b/include/linux/iov_iter.h
@@ -10,6 +10,7 @@
#include <linux/uio.h>
#include <linux/bvec.h>
+#include <linux/folio_queue.h>
typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
void *priv, void *priv2);
@@ -141,6 +142,60 @@ size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
}
/*
+ * Handle ITER_FOLIOQ.
+ */
+static __always_inline
+size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int slot = iter->folioq_slot;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ if (slot == folioq_nr_slots(folioq)) {
+ /* The iterator may have been extended. */
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t part, remain, consumed;
+ size_t fsize;
+ void *base;
+
+ if (!folio)
+ break;
+
+ fsize = folioq_folio_size(folioq, slot);
+ base = kmap_local_folio(folio, skip);
+ part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= fsize) {
+ skip = 0;
+ slot++;
+ if (slot == folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->folioq_slot = slot;
+ iter->folioq = folioq;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
* Handle ITER_XARRAY.
*/
static __always_inline
@@ -249,6 +304,8 @@ size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
return iterate_bvec(iter, len, priv, priv2, step);
if (iov_iter_is_kvec(iter))
return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
if (iov_iter_is_xarray(iter))
return iterate_xarray(iter, len, priv, priv2, step);
return iterate_discard(iter, len, priv, priv2, step);
@@ -271,4 +328,51 @@ size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
}
+/**
+ * iterate_and_advance_kernel - Iterate over a kernel-internal iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
+ *
+ * A step functions, @step, must be provided, one for handling mapped kernel
+ * addresses and the other is given user addresses which have the potential to
+ * fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1f5dbf1f92c9..fa711f80957b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -991,7 +991,6 @@ void irq_init_desc(unsigned int irq);
* @ack: Ack register offset to reg_base
* @eoi: Eoi register offset to reg_base
* @type: Type configuration register offset to reg_base
- * @polarity: Polarity configuration register offset to reg_base
*/
struct irq_chip_regs {
unsigned long enable;
@@ -1000,7 +999,6 @@ struct irq_chip_regs {
unsigned long ack;
unsigned long eoi;
unsigned long type;
- unsigned long polarity;
};
/**
@@ -1040,8 +1038,6 @@ struct irq_chip_type {
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
- * @type_cache: Cached type register
- * @polarity_cache: Cached polarity register
* @wake_enabled: Interrupt can wakeup from suspend
* @wake_active: Interrupt is marked as an wakeup from suspend source
* @num_ct: Number of available irq_chip_type instances (usually 1)
@@ -1068,8 +1064,6 @@ struct irq_chip_generic {
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
- u32 type_cache;
- u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;
diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h
index faf0b800b1b0..7494952c5518 100644
--- a/include/linux/irqchip/riscv-imsic.h
+++ b/include/linux/irqchip/riscv-imsic.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
#include <asm/csr.h>
#define IMSIC_MMIO_PAGE_SHIFT 12
@@ -84,4 +86,11 @@ static inline const struct imsic_global_config *imsic_get_global_config(void)
#endif
+#ifdef CONFIG_ACPI
+int imsic_platform_acpi_probe(struct fwnode_handle *fwnode);
+struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev);
+#else
+static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; }
+#endif
+
#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index de6105f68fec..e432b6a12a32 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -291,7 +291,12 @@ struct irq_domain_chip_generic_info;
* @hwirq_max: Maximum number of interrupts supported by controller
* @direct_max: Maximum value of direct maps;
* Use ~0 for no limit; 0 for no direct mapping
+ * @hwirq_base: The first hardware interrupt number (legacy domains only)
+ * @virq_base: The first Linux interrupt number for legacy domains to
+ * immediately associate the interrupts after domain creation
* @bus_token: Domain bus token
+ * @name_suffix: Optional name suffix to avoid collisions when multiple
+ * domains are added using same fwnode
* @ops: Domain operation callbacks
* @host_data: Controller private data pointer
* @dgc_info: Geneneric chip information structure pointer used to
@@ -307,7 +312,10 @@ struct irq_domain_info {
unsigned int size;
irq_hw_number_t hwirq_max;
int direct_max;
+ unsigned int hwirq_base;
+ unsigned int virq_base;
enum irq_domain_bus_token bus_token;
+ const char *name_suffix;
const struct irq_domain_ops *ops;
void *host_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 5157d92b6f23..8aef9bb6ad57 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1086,7 +1086,7 @@ struct journal_s
int j_revoke_records_per_block;
/**
- * @j_transaction_overhead:
+ * @j_transaction_overhead_buffers:
*
* Number of blocks each transaction needs for its own bookkeeping
*/
@@ -1675,7 +1675,7 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
-int jbd2_fc_release_bufs(journal_t *journal);
+void jbd2_fc_release_bufs(journal_t *journal);
/*
* is_journal_abort
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index d9f1435a5a13..1220f0fbe5bf 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -418,7 +418,7 @@ extern unsigned long preset_lpj;
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * The maximum jiffy value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
* so use the messy SH_DIV macro to do it. Still all constants.
*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 70d6a8f6e25d..00a3bf7c0d8f 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -175,13 +175,59 @@ static __always_inline void * __must_check kasan_init_slab_obj(
return (void *)object;
}
-bool __kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip, bool init);
+bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
+ unsigned long ip);
+/**
+ * kasan_slab_pre_free - Check whether freeing a slab object is safe.
+ * @object: Object to be freed.
+ *
+ * This function checks whether freeing the given object is safe. It may
+ * check for double-free and invalid-free bugs and report them.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if freeing the object is unsafe; false otherwise.
+ */
+static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
+ void *object)
+{
+ if (kasan_enabled())
+ return __kasan_slab_pre_free(s, object, _RET_IP_);
+ return false;
+}
+
+bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
+ bool still_accessible);
+/**
+ * kasan_slab_free - Poison, initialize, and quarantine a slab object.
+ * @object: Object to be freed.
+ * @init: Whether to initialize the object.
+ * @still_accessible: Whether the object contents are still accessible.
+ *
+ * This function informs that a slab object has been freed and is not
+ * supposed to be accessed anymore, except when @still_accessible is set
+ * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
+ * grace period might not have passed yet).
+ *
+ * For KASAN modes that have integrated memory initialization
+ * (kasan_has_integrated_init() == true), this function also initializes
+ * the object's memory. For other modes, the @init argument is ignored.
+ *
+ * This function might also take ownership of the object to quarantine it.
+ * When this happens, KASAN will defer freeing the object to a later
+ * stage and handle it internally until then. The return value indicates
+ * whether KASAN took ownership of the object.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if KASAN took ownership of the object; false otherwise.
+ */
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
- void *object, bool init)
+ void *object, bool init,
+ bool still_accessible)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, _RET_IP_, init);
+ return __kasan_slab_free(s, object, init, still_accessible);
return false;
}
@@ -371,7 +417,14 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
+
+static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
+{
+ return false;
+}
+
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+ bool init, bool still_accessible)
{
return false;
}
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 859f4b0c1b2b..196778a087c4 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -10,12 +10,11 @@
*/
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
+#define KPF_OWNER_2 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
-#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
#define KPF_ARCH_2 41
#define KPF_ARCH_3 42
diff --git a/include/linux/key.h b/include/linux/key.h
index 943a432da3ae..074dca3222b9 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -436,9 +436,6 @@ extern key_ref_t keyring_search(key_ref_t keyring,
const char *description,
bool recurse);
-extern int keyring_add_key(struct key *keyring,
- struct key *key);
-
extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 88100cc9caba..0ad1ddbb8b99 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -124,7 +124,7 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp
if (!static_branch_likely(&kfence_allocation_key))
return NULL;
#endif
- if (likely(atomic_read(&kfence_allocation_gate)))
+ if (likely(atomic_read(&kfence_allocation_gate) > 0))
return NULL;
return __kfence_alloc(s, size, flags);
}
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index f68865e19b0b..30baae91b225 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -4,6 +4,7 @@
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
+extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 906521c2329c..6055fc969877 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -40,6 +40,17 @@ struct kmsg_dump_iter {
};
/**
+ * struct kmsg_dump_detail - kernel crash detail
+ * @reason: reason for the crash, see kmsg_dump_reason.
+ * @description: optional short string, to provide additional information.
+ */
+
+struct kmsg_dump_detail {
+ enum kmsg_dump_reason reason;
+ const char *description;
+};
+
+/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
* @dump: Call into dumping code which will retrieve the data with
@@ -49,13 +60,13 @@ struct kmsg_dump_iter {
*/
struct kmsg_dumper {
struct list_head list;
- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail);
enum kmsg_dump_reason max_reason;
bool registered;
};
#ifdef CONFIG_PRINTK
-void kmsg_dump(enum kmsg_dump_reason reason);
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc);
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
@@ -71,7 +82,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper);
const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
-static inline void kmsg_dump(enum kmsg_dump_reason reason)
+static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
}
@@ -107,4 +118,9 @@ static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
}
#endif
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ kmsg_dump_desc(reason, NULL);
+}
+
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5fcbc254d186..8c4f3bb24429 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -269,15 +269,6 @@ extern unsigned long __stop_kprobe_blacklist[];
extern struct kretprobe_blackpoint kretprobe_blacklist[];
-#ifdef CONFIG_KPROBES_SANITY_TEST
-extern int init_test_probes(void);
-#else /* !CONFIG_KPROBES_SANITY_TEST */
-static inline int init_test_probes(void)
-{
- return 0;
-}
-#endif /* CONFIG_KPROBES_SANITY_TEST */
-
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0d5125a3e31a..db567d26f7b9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1529,8 +1529,22 @@ static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-int kvm_arch_hardware_enable(void);
-void kvm_arch_hardware_disable(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
+ * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
+ * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
+ * sequence, and at the end of the generic hardware disabling sequence.
+ */
+void kvm_arch_enable_virtualization(void);
+void kvm_arch_disable_virtualization(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
+ * do the actual twiddling of hardware bits. The hooks are called on all
+ * online CPUs when KVM enables/disabled virtualization, and on a single CPU
+ * when that CPU is onlined/offlined (including for Resume/Suspend).
+ */
+int kvm_arch_enable_virtualization_cpu(void);
+void kvm_arch_disable_virtualization_cpu(void);
#endif
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6885603f211b..e5968c3ed4ae 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -611,6 +611,8 @@ enum led_trigger_netdev_modes {
TRIGGER_NETDEV_FULL_DUPLEX,
TRIGGER_NETDEV_TX,
TRIGGER_NETDEV_RX,
+ TRIGGER_NETDEV_TX_ERR,
+ TRIGGER_NETDEV_RX_ERR,
/* Keep last */
__TRIGGER_NETDEV_MAX,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 17394098bee9..9b4a6ff03235 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -55,6 +55,46 @@
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
+/*
+ * Quirk flags bits.
+ * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ */
+enum ata_quirks {
+ __ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
+ __ATA_QUIRK_NODMA, /* DMA problems */
+ __ATA_QUIRK_NONCQ, /* Don't use NCQ */
+ __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
+ __ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
+ __ATA_QUIRK_DISABLE, /* Disable it */
+ __ATA_QUIRK_HPA_SIZE, /* Native size off by one */
+ __ATA_QUIRK_IVB, /* cbl det validity bit bugs */
+ __ATA_QUIRK_STUCK_ERR, /* Stuck ERR on next PACKET */
+ __ATA_QUIRK_BRIDGE_OK, /* No bridge limits */
+ __ATA_QUIRK_ATAPI_MOD16_DMA, /* Use ATAPI DMA for commands that */
+ /* are not a multiple of 16 bytes */
+ __ATA_QUIRK_FIRMWARE_WARN, /* Firmware update warning */
+ __ATA_QUIRK_1_5_GBPS, /* Force 1.5 Gbps */
+ __ATA_QUIRK_NOSETXFER, /* Skip SETXFER, SATA only */
+ __ATA_QUIRK_BROKEN_FPDMA_AA, /* Skip AA */
+ __ATA_QUIRK_DUMP_ID, /* Dump IDENTIFY data */
+ __ATA_QUIRK_MAX_SEC_LBA48, /* Set max sects to 65535 */
+ __ATA_QUIRK_ATAPI_DMADIR, /* Device requires dmadir */
+ __ATA_QUIRK_NO_NCQ_TRIM, /* Do not use queued TRIM */
+ __ATA_QUIRK_NOLPM, /* Do not use LPM */
+ __ATA_QUIRK_WD_BROKEN_LPM, /* Some WDs have broken LPM */
+ __ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
+ __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
+ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */
+ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
+ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
+ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
+ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */
+ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */
+ __ATA_QUIRK_NO_FUA, /* Do not use FUA */
+
+ __ATA_QUIRK_MAX,
+};
+
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
@@ -338,6 +378,7 @@ enum {
ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */
+ ATA_EHI_DID_PRINT_QUIRKS = (1 << 21), /* already printed quirks info */
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
@@ -362,43 +403,42 @@ enum {
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
- /* Horkage types. May be set by libata or controller on drives
- (some horkage may be drive/controller pair dependent */
-
- ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
- ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
- ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
- ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
- ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
- ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
- ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
- ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
- ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
- ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
- ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
- not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
- ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
- ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
- ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
- ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
- ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
- ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
- ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
- ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
- ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
- ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
- ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
- ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
- ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
- ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
- ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
- ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */
-
- /* DMA mask for user DMA control: User visible values; DO NOT
- renumber */
+ /*
+ * Quirk flags: may be set by libata or controller drivers on drives.
+ * Some quirks may be drive/controller pair dependent.
+ */
+ ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
+ ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
+ ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
+ ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
+
+ /* User visible DMA mask for DMA control. DO NOT renumber. */
ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
@@ -660,10 +700,25 @@ struct ata_cpr_log {
struct ata_cpr cpr[] __counted_by(nr_cpr);
};
+struct ata_cdl {
+ /*
+ * Buffer to cache the CDL log page 18h (command duration descriptors)
+ * for SCSI-ATA translation.
+ */
+ u8 desc_log_buf[ATA_LOG_CDL_SIZE];
+
+ /*
+ * Buffer to handle reading the sense data for successful NCQ Commands
+ * log page for commands using a CDL with one of the limits policy set
+ * to 0xD (successful completion with sense data available bit set).
+ */
+ u8 ncq_sense_log_buf[ATA_LOG_SENSE_NCQ_SIZE];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int horkage; /* List of broken features */
+ unsigned int quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -722,13 +777,16 @@ struct ata_device {
/* Concurrent positioning ranges */
struct ata_cpr_log *cpr_log;
- /* Command Duration Limits log support */
- u8 cdl[ATA_LOG_CDL_SIZE];
+ /* Command Duration Limits support */
+ struct ata_cdl *cdl;
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
struct ata_ering ering;
+
+ /* For EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
@@ -874,9 +932,6 @@ struct ata_port {
#ifdef CONFIG_ATA_ACPI
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
- /* owned by EH */
- u8 *ncq_sense_buf;
- u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* The following initializer overrides a method to NULL whether one of
@@ -1064,8 +1119,6 @@ static inline bool ata_port_is_frozen(const struct ata_port *ap)
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports);
@@ -1129,7 +1182,6 @@ extern int ata_xfer_mode2shift(u8 xfer_mode);
extern const char *ata_mode_string(unsigned int xfer_mask);
extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1190,12 +1242,13 @@ extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
extern int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
-extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
static inline const unsigned int *
@@ -1217,6 +1270,11 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
return -EOPNOTSUPP;
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
static inline int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing,
unsigned long deadline,
@@ -1233,10 +1291,6 @@ static inline int sata_link_resume(struct ata_link *link,
{
return -EOPNOTSUPP;
}
-static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
-{
- return -EOPNOTSUPP;
-}
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
@@ -1967,7 +2021,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
-extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_sff_queue_work(struct work_struct *work);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 1b95fe31051f..61c4b9c41904 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -200,7 +200,7 @@ extern const struct svc_procedure nlmsvc_procedures[24];
extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
-extern unsigned long nlmsvc_timeout;
+extern unsigned long nlm_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index c9afcdd9324c..ff82ef85a084 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -119,7 +119,7 @@ write intent log information, three of which are mentioned here.
*/
/* this defines an element in a tracked set
- * .colision is for hash table lookup.
+ * .collision is for hash table lookup.
* When we process a new IO request, we know its sector, thus can deduce the
* region number (label) easily. To do the label -> object lookup without a
* full list walk, we use a simple hash table.
@@ -145,7 +145,7 @@ write intent log information, three of which are mentioned here.
* But it avoids high order page allocations in kmalloc.
*/
struct lc_element {
- struct hlist_node colision;
+ struct hlist_node collision;
struct list_head list; /* LRU list or free list */
unsigned refcnt;
/* back "pointer" into lc_cache->element[index],
diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
new file mode 100644
index 000000000000..16eb49761b25
--- /dev/null
+++ b/include/linux/lsm_count.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __LINUX_LSM_COUNT_H
+#define __LINUX_LSM_COUNT_H
+
+#include <linux/args.h>
+
+#ifdef CONFIG_SECURITY
+
+/*
+ * Macros to count the number of LSMs enabled in the kernel at compile time.
+ */
+
+/*
+ * Capabilities is enabled when CONFIG_SECURITY is enabled.
+ */
+#if IS_ENABLED(CONFIG_SECURITY)
+#define CAPABILITIES_ENABLED 1,
+#else
+#define CAPABILITIES_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
+#define SELINUX_ENABLED 1,
+#else
+#define SELINUX_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SMACK)
+#define SMACK_ENABLED 1,
+#else
+#define SMACK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
+#define APPARMOR_ENABLED 1,
+#else
+#define APPARMOR_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
+#define TOMOYO_ENABLED 1,
+#else
+#define TOMOYO_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_YAMA)
+#define YAMA_ENABLED 1,
+#else
+#define YAMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
+#define LOADPIN_ENABLED 1,
+#else
+#define LOADPIN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
+#define LOCKDOWN_ENABLED 1,
+#else
+#define LOCKDOWN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SAFESETID)
+#define SAFESETID_ENABLED 1,
+#else
+#define SAFESETID_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_BPF_LSM)
+#define BPF_LSM_ENABLED 1,
+#else
+#define BPF_LSM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
+#define LANDLOCK_ENABLED 1,
+#else
+#define LANDLOCK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_IMA)
+#define IMA_ENABLED 1,
+#else
+#define IMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_EVM)
+#define EVM_ENABLED 1,
+#else
+#define EVM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_IPE)
+#define IPE_ENABLED 1,
+#else
+#define IPE_ENABLED
+#endif
+
+/*
+ * There is a trailing comma that we need to be accounted for. This is done by
+ * using a skipped argument in __COUNT_LSMS
+ */
+#define __COUNT_LSMS(skipped_arg, args...) COUNT_ARGS(args...)
+#define COUNT_LSMS(args...) __COUNT_LSMS(args)
+
+#define MAX_LSM_COUNT \
+ COUNT_LSMS( \
+ CAPABILITIES_ENABLED \
+ SELINUX_ENABLED \
+ SMACK_ENABLED \
+ APPARMOR_ENABLED \
+ TOMOYO_ENABLED \
+ YAMA_ENABLED \
+ LOADPIN_ENABLED \
+ LOCKDOWN_ENABLED \
+ SAFESETID_ENABLED \
+ BPF_LSM_ENABLED \
+ LANDLOCK_ENABLED \
+ IMA_ENABLED \
+ EVM_ENABLED \
+ IPE_ENABLED)
+
+#else
+
+#define MAX_LSM_COUNT 0
+
+#endif /* CONFIG_SECURITY */
+
+#endif /* __LINUX_LSM_COUNT_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 855db460e08b..9eca013aa5e1 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
-LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
@@ -114,6 +114,7 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
unsigned int obj_type)
LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security)
LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
int *xattr_count)
@@ -179,6 +180,8 @@ LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src,
const char *name)
+LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode,
+ enum lsm_integrity_type type, const void *value, size_t size)
LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
struct kernfs_node *kn)
LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
@@ -353,8 +356,7 @@ LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
struct flowi_common *flic)
-LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
-LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void *security)
LSM_HOOK(int, 0, tun_dev_create, void)
LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
@@ -374,8 +376,7 @@ LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk)
LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
u8 port_num)
-LSM_HOOK(int, 0, ib_alloc_security, void **sec)
-LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec)
+LSM_HOOK(int, 0, ib_alloc_security, void *sec)
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -403,7 +404,6 @@ LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
#ifdef CONFIG_KEYS
LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
unsigned long flags)
-LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm)
LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
@@ -431,7 +431,7 @@ LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token)
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
@@ -442,7 +442,6 @@ LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
#ifdef CONFIG_PERF_EVENTS
LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
-LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#endif /* CONFIG_PERF_EVENTS */
@@ -452,3 +451,10 @@ LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
LSM_HOOK(int, 0, uring_sqpoll, void)
LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
#endif /* CONFIG_IO_URING */
+
+LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void)
+
+LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev)
+LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev)
+LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index a2ade0ffe9e7..090d1d3e19fe 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -30,19 +30,47 @@
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/xattr.h>
+#include <linux/static_call.h>
+#include <linux/unroll.h>
+#include <linux/jump_label.h>
+#include <linux/lsm_count.h>
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
#include "lsm_hook_defs.h"
#undef LSM_HOOK
+ void *lsm_func_addr;
};
-struct security_hook_heads {
- #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
- #include "lsm_hook_defs.h"
- #undef LSM_HOOK
+/*
+ * @key: static call key as defined by STATIC_CALL_KEY
+ * @trampoline: static call trampoline as defined by STATIC_CALL_TRAMP
+ * @hl: The security_hook_list as initialized by the owning LSM.
+ * @active: Enabled when the static call has an LSM hook associated.
+ */
+struct lsm_static_call {
+ struct static_call_key *key;
+ void *trampoline;
+ struct security_hook_list *hl;
+ /* this needs to be true or false based on what the key defaults to */
+ struct static_key_false *active;
} __randomize_layout;
+/*
+ * Table of the static calls for each LSM hook.
+ * Once the LSMs are initialized, their callbacks will be copied to these
+ * tables such that the calls are filled backwards (from last to first).
+ * This way, we can jump directly to the first used static call, and execute
+ * all of them after. This essentially makes the entry point
+ * dynamic to adapt the number of static calls to the number of callbacks.
+ */
+struct lsm_static_calls_table {
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ struct lsm_static_call NAME[MAX_LSM_COUNT];
+ #include <linux/lsm_hook_defs.h>
+ #undef LSM_HOOK
+} __packed __randomize_layout;
+
/**
* struct lsm_id - Identify a Linux Security Module.
* @lsm: name of the LSM, must be approved by the LSM maintainers
@@ -51,53 +79,45 @@ struct security_hook_heads {
* Contains the information that identifies the LSM.
*/
struct lsm_id {
- const char *name;
- u64 id;
+ const char *name;
+ u64 id;
};
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
+ *
+ * struct security_hook_list - Contents of a cacheable, mappable object.
+ * @scalls: The beginning of the array of static calls assigned to this hook.
+ * @hook: The callback for the hook.
+ * @lsm: The name of the lsm that owns this hook.
*/
struct security_hook_list {
- struct hlist_node list;
- struct hlist_head *head;
- union security_list_options hook;
- const struct lsm_id *lsmid;
+ struct lsm_static_call *scalls;
+ union security_list_options hook;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
* Security blob size or offset data.
*/
struct lsm_blob_sizes {
- int lbs_cred;
- int lbs_file;
- int lbs_inode;
- int lbs_superblock;
- int lbs_ipc;
- int lbs_msg_msg;
- int lbs_task;
- int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
+ int lbs_cred;
+ int lbs_file;
+ int lbs_ib;
+ int lbs_inode;
+ int lbs_sock;
+ int lbs_superblock;
+ int lbs_ipc;
+ int lbs_key;
+ int lbs_msg_msg;
+ int lbs_perf_event;
+ int lbs_task;
+ int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
+ int lbs_tun_dev;
+ int lbs_bdev;
};
-/**
- * lsm_get_xattr_slot - Return the next available slot and increment the index
- * @xattrs: array storing LSM-provided xattrs
- * @xattr_count: number of already stored xattrs (updated)
- *
- * Retrieve the first available slot in the @xattrs array to fill with an xattr,
- * and increment @xattr_count.
- *
- * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
- */
-static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
- int *xattr_count)
-{
- if (unlikely(!xattrs))
- return NULL;
- return &xattrs[(*xattr_count)++];
-}
-
/*
* LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
* LSM hooks (in include/linux/lsm_hook_defs.h).
@@ -110,11 +130,11 @@ static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
* care of the common case and reduces the amount of
* text involved.
*/
-#define LSM_HOOK_INIT(HEAD, HOOK) \
- { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
-
-extern struct security_hook_heads security_hook_heads;
-extern char *lsm_names;
+#define LSM_HOOK_INIT(NAME, HOOK) \
+ { \
+ .scalls = static_calls_table.NAME, \
+ .hook = { .NAME = HOOK } \
+ }
extern void security_add_hooks(struct security_hook_list *hooks, int count,
const struct lsm_id *lsmid);
@@ -137,9 +157,6 @@ struct lsm_info {
struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */
};
-extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
-extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
-
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
__used __section(".lsm_info.init") \
@@ -150,6 +167,28 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
__used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
-extern int lsm_inode_alloc(struct inode *inode);
+/* DO NOT tamper with these variables outside of the LSM framework */
+extern char *lsm_names;
+extern struct lsm_static_calls_table static_calls_table __ro_after_init;
+extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
+extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
+
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
+ *
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
+ */
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
+{
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
+}
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index a53ad4dabd7e..c2c11004085e 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -52,9 +52,9 @@
* bit in the node type. This is possible by using bit 1 to indicate if bit 2
* is part of the type or the slot.
*
- * Once the type is decided, the decision of an allocation range type or a range
- * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
- * flag.
+ * Once the type is decided, the decision of an allocation range type or a
+ * range type is done by examining the immutable tree flag for the
+ * MT_FLAGS_ALLOC_RANGE flag.
*
* Node types:
* 0x??1 = Root
@@ -148,6 +148,18 @@ enum maple_type {
maple_arange_64,
};
+enum store_type {
+ wr_invalid,
+ wr_new_root,
+ wr_store_root,
+ wr_exact_fit,
+ wr_spanning_store,
+ wr_split_store,
+ wr_rebalance,
+ wr_append,
+ wr_node_store,
+ wr_slot_store,
+};
/**
* DOC: Maple tree flags
@@ -436,6 +448,7 @@ struct ma_state {
unsigned char offset;
unsigned char mas_flags;
unsigned char end; /* The end of the node */
+ enum store_type store_type; /* The type of store needed for this operation */
};
struct ma_wr_state {
@@ -477,6 +490,7 @@ struct ma_wr_state {
.max = ULONG_MAX, \
.alloc = NULL, \
.mas_flags = 0, \
+ .store_type = wr_invalid, \
}
#define MA_WR_STATE(name, ma_state, wr_entry) \
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index fc4d75c6cec3..673d5cae7c81 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -467,6 +467,7 @@ static inline __init_memblock bool memblock_bottom_up(void)
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
+unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0e5bf25d324f..34d2da05f2f1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -57,7 +57,7 @@ enum memcg_memory_event {
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- unsigned int generation;
+ int generation;
};
#ifdef CONFIG_MEMCG
@@ -70,6 +70,7 @@ struct mem_cgroup_id {
};
struct memcg_vmstats_percpu;
+struct memcg1_events_percpu;
struct memcg_vmstats;
struct lruvec_stats_percpu;
struct lruvec_stats;
@@ -77,7 +78,7 @@ struct lruvec_stats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
/* scan generation, increased every round-trip */
- unsigned int generation;
+ atomic_t generation;
};
/*
@@ -193,6 +194,11 @@ struct mem_cgroup {
struct page_counter memsw; /* v1 only */
};
+ /* registered local peak watchers */
+ struct list_head memory_peaks;
+ struct list_head swap_peaks;
+ spinlock_t peaks_lock;
+
/* Range enforcement for interrupt charges */
struct work_struct high_work;
@@ -270,6 +276,8 @@ struct mem_cgroup {
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
+ struct memcg1_events_percpu __percpu *events_percpu;
+
unsigned long soft_limit;
/* protected by memcg_oom_lock */
@@ -361,11 +369,11 @@ static inline bool folio_memcg_kmem(struct folio *folio);
* After the initialization objcg->memcg is always pointing at
* a valid memcg, but can be atomically swapped to the parent memcg.
*
- * The caller must ensure that the returned memcg won't be released:
- * e.g. acquire the rcu_read_lock or css_set_lock.
+ * The caller must ensure that the returned memcg won't be released.
*/
static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
{
+ lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
return READ_ONCE(objcg->memcg);
}
@@ -439,6 +447,19 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
return __folio_memcg(folio);
}
+/*
+ * folio_memcg_charged - If a folio is charged to a memory cgroup.
+ * @folio: Pointer to the folio.
+ *
+ * Returns true if folio is charged to a memory cgroup, otherwise returns false.
+ */
+static inline bool folio_memcg_charged(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return __folio_objcg(folio) != NULL;
+ return __folio_memcg(folio) != NULL;
+}
+
/**
* folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
* @folio: Pointer to the folio.
@@ -455,7 +476,6 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
unsigned long memcg_data = READ_ONCE(folio->memcg_data);
VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
- WARN_ON_ONCE(!rcu_read_lock_held());
if (memcg_data & MEMCG_DATA_KMEM) {
struct obj_cgroup *objcg;
@@ -464,6 +484,8 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
return obj_cgroup_memcg(objcg);
}
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
}
@@ -677,7 +699,8 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
+
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
void __mem_cgroup_uncharge(struct folio *folio);
@@ -762,6 +785,8 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct mem_cgroup *get_mem_cgroup_from_current(void);
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
+
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
@@ -1006,8 +1031,8 @@ static inline void count_memcg_folio_events(struct folio *folio,
count_memcg_events(memcg, idx, nr);
}
-static inline void count_memcg_event_mm(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1017,10 +1042,16 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (likely(memcg))
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
+{
+ count_memcg_events_mm(mm, idx, 1);
+}
+
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
@@ -1176,7 +1207,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
return 0;
}
-static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
+static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
{
}
@@ -1240,6 +1271,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
@@ -1462,6 +1498,11 @@ static inline void count_memcg_folio_events(struct folio *folio,
{
}
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
+{
+}
+
static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
@@ -1717,7 +1758,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-struct mem_cgroup *mem_cgroup_from_obj(void *p);
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
static inline void count_objcg_event(struct obj_cgroup *objcg,
@@ -1780,11 +1820,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return -1;
}
-static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
-{
- return NULL;
-}
-
static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
return NULL;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index ebe876930e78..b27ddce5d324 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -16,54 +16,6 @@ struct resource;
struct vmem_altmap;
struct dev_pagemap;
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-
-#ifdef CONFIG_NUMA
-/*
- * XXX: node aware allocation can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
-})
-
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- node_data[nid] = pgdat;
-}
-
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
#ifdef CONFIG_MEMORY_HOTPLUG
struct page *pfn_to_online_page(unsigned long pfn);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index def5df6e74bf..551ef1c367d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -294,7 +294,7 @@ struct pm80x_chip {
struct i2c_client *client;
struct i2c_client *companion;
struct regmap *regmap;
- struct regmap_irq_chip *regmap_irq_chip;
+ const struct regmap_irq_chip *regmap_irq_chip;
struct regmap_irq_chip_data *irq_data;
int type;
int irq;
diff --git a/include/linux/mfd/adp5585.h b/include/linux/mfd/adp5585.h
new file mode 100644
index 000000000000..016033cd68e4
--- /dev/null
+++ b/include/linux/mfd/adp5585.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ */
+
+#ifndef __MFD_ADP5585_H_
+#define __MFD_ADP5585_H_
+
+#include <linux/bits.h>
+
+#define ADP5585_ID 0x00
+#define ADP5585_MAN_ID_VALUE 0x20
+#define ADP5585_MAN_ID_MASK GENMASK(7, 4)
+#define ADP5585_INT_STATUS 0x01
+#define ADP5585_STATUS 0x02
+#define ADP5585_FIFO_1 0x03
+#define ADP5585_FIFO_2 0x04
+#define ADP5585_FIFO_3 0x05
+#define ADP5585_FIFO_4 0x06
+#define ADP5585_FIFO_5 0x07
+#define ADP5585_FIFO_6 0x08
+#define ADP5585_FIFO_7 0x09
+#define ADP5585_FIFO_8 0x0a
+#define ADP5585_FIFO_9 0x0b
+#define ADP5585_FIFO_10 0x0c
+#define ADP5585_FIFO_11 0x0d
+#define ADP5585_FIFO_12 0x0e
+#define ADP5585_FIFO_13 0x0f
+#define ADP5585_FIFO_14 0x10
+#define ADP5585_FIFO_15 0x11
+#define ADP5585_FIFO_16 0x12
+#define ADP5585_GPI_INT_STAT_A 0x13
+#define ADP5585_GPI_INT_STAT_B 0x14
+#define ADP5585_GPI_STATUS_A 0x15
+#define ADP5585_GPI_STATUS_B 0x16
+#define ADP5585_RPULL_CONFIG_A 0x17
+#define ADP5585_RPULL_CONFIG_B 0x18
+#define ADP5585_RPULL_CONFIG_C 0x19
+#define ADP5585_RPULL_CONFIG_D 0x1a
+#define ADP5585_Rx_PULL_CFG_PU_300K 0
+#define ADP5585_Rx_PULL_CFG_PD_300K 1
+#define ADP5585_Rx_PULL_CFG_PU_100K 2
+#define ADP5585_Rx_PULL_CFG_DISABLE 3
+#define ADP5585_Rx_PULL_CFG_MASK 3
+#define ADP5585_GPI_INT_LEVEL_A 0x1b
+#define ADP5585_GPI_INT_LEVEL_B 0x1c
+#define ADP5585_GPI_EVENT_EN_A 0x1d
+#define ADP5585_GPI_EVENT_EN_B 0x1e
+#define ADP5585_GPI_INTERRUPT_EN_A 0x1f
+#define ADP5585_GPI_INTERRUPT_EN_B 0x20
+#define ADP5585_DEBOUNCE_DIS_A 0x21
+#define ADP5585_DEBOUNCE_DIS_B 0x22
+#define ADP5585_GPO_DATA_OUT_A 0x23
+#define ADP5585_GPO_DATA_OUT_B 0x24
+#define ADP5585_GPO_OUT_MODE_A 0x25
+#define ADP5585_GPO_OUT_MODE_B 0x26
+#define ADP5585_GPIO_DIRECTION_A 0x27
+#define ADP5585_GPIO_DIRECTION_B 0x28
+#define ADP5585_RESET1_EVENT_A 0x29
+#define ADP5585_RESET1_EVENT_B 0x2a
+#define ADP5585_RESET1_EVENT_C 0x2b
+#define ADP5585_RESET2_EVENT_A 0x2c
+#define ADP5585_RESET2_EVENT_B 0x2d
+#define ADP5585_RESET_CFG 0x2e
+#define ADP5585_PWM_OFFT_LOW 0x2f
+#define ADP5585_PWM_OFFT_HIGH 0x30
+#define ADP5585_PWM_ONT_LOW 0x31
+#define ADP5585_PWM_ONT_HIGH 0x32
+#define ADP5585_PWM_CFG 0x33
+#define ADP5585_PWM_IN_AND BIT(2)
+#define ADP5585_PWM_MODE BIT(1)
+#define ADP5585_PWM_EN BIT(0)
+#define ADP5585_LOGIC_CFG 0x34
+#define ADP5585_LOGIC_FF_CFG 0x35
+#define ADP5585_LOGIC_INT_EVENT_EN 0x36
+#define ADP5585_POLL_PTIME_CFG 0x37
+#define ADP5585_PIN_CONFIG_A 0x38
+#define ADP5585_PIN_CONFIG_B 0x39
+#define ADP5585_PIN_CONFIG_C 0x3a
+#define ADP5585_PULL_SELECT BIT(7)
+#define ADP5585_C4_EXTEND_CFG_GPIO11 (0U << 6)
+#define ADP5585_C4_EXTEND_CFG_RESET2 (1U << 6)
+#define ADP5585_C4_EXTEND_CFG_MASK GENMASK(6, 6)
+#define ADP5585_R4_EXTEND_CFG_GPIO5 (0U << 5)
+#define ADP5585_R4_EXTEND_CFG_RESET1 (1U << 5)
+#define ADP5585_R4_EXTEND_CFG_MASK GENMASK(5, 5)
+#define ADP5585_R3_EXTEND_CFG_GPIO4 (0U << 2)
+#define ADP5585_R3_EXTEND_CFG_LC (1U << 2)
+#define ADP5585_R3_EXTEND_CFG_PWM_OUT (2U << 2)
+#define ADP5585_R3_EXTEND_CFG_MASK GENMASK(3, 2)
+#define ADP5585_R0_EXTEND_CFG_GPIO1 (0U << 0)
+#define ADP5585_R0_EXTEND_CFG_LY (1U << 0)
+#define ADP5585_R0_EXTEND_CFG_MASK GENMASK(0, 0)
+#define ADP5585_GENERAL_CFG 0x3b
+#define ADP5585_OSC_EN BIT(7)
+#define ADP5585_OSC_FREQ_50KHZ (0U << 5)
+#define ADP5585_OSC_FREQ_100KHZ (1U << 5)
+#define ADP5585_OSC_FREQ_200KHZ (2U << 5)
+#define ADP5585_OSC_FREQ_500KHZ (3U << 5)
+#define ADP5585_OSC_FREQ_MASK GENMASK(6, 5)
+#define ADP5585_INT_CFG BIT(1)
+#define ADP5585_RST_CFG BIT(0)
+#define ADP5585_INT_EN 0x3c
+
+#define ADP5585_MAX_REG ADP5585_INT_EN
+
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the
+ * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to
+ * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver
+ * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is
+ * marked as reserved in the device tree for variants that don't support it.
+ */
+#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0)
+#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n))
+
+struct regmap;
+
+struct adp5585_dev {
+ struct regmap *regmap;
+};
+
+#endif
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 8c0a33a2e9ce..f4dfc1871a95 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -115,6 +115,16 @@ enum axp20x_variants {
#define AXP313A_IRQ_STATE 0x21
#define AXP717_ON_INDICATE 0x00
+#define AXP717_PMU_STATUS_2 0x01
+#define AXP717_BC_DETECT 0x05
+#define AXP717_PMU_FAULT 0x08
+#define AXP717_MODULE_EN_CONTROL_1 0x0b
+#define AXP717_MIN_SYS_V_CONTROL 0x15
+#define AXP717_INPUT_VOL_LIMIT_CTRL 0x16
+#define AXP717_INPUT_CUR_LIMIT_CTRL 0x17
+#define AXP717_MODULE_EN_CONTROL_2 0x19
+#define AXP717_BOOST_CONTROL 0x1e
+#define AXP717_VSYS_V_POWEROFF 0x24
#define AXP717_IRQ0_EN 0x40
#define AXP717_IRQ1_EN 0x41
#define AXP717_IRQ2_EN 0x42
@@ -125,6 +135,9 @@ enum axp20x_variants {
#define AXP717_IRQ2_STATE 0x4a
#define AXP717_IRQ3_STATE 0x4b
#define AXP717_IRQ4_STATE 0x4c
+#define AXP717_ICC_CHG_SET 0x62
+#define AXP717_ITERM_CHG_SET 0x63
+#define AXP717_CV_CHG_SET 0x64
#define AXP717_DCDC_OUTPUT_CONTROL 0x80
#define AXP717_DCDC1_CONTROL 0x83
#define AXP717_DCDC2_CONTROL 0x84
@@ -145,6 +158,19 @@ enum axp20x_variants {
#define AXP717_CLDO3_CONTROL 0x9d
#define AXP717_CLDO4_CONTROL 0x9e
#define AXP717_CPUSLDO_CONTROL 0x9f
+#define AXP717_BATT_PERCENT_DATA 0xa4
+#define AXP717_ADC_CH_EN_CONTROL 0xc0
+#define AXP717_BATT_V_H 0xc4
+#define AXP717_BATT_V_L 0xc5
+#define AXP717_VBUS_V_H 0xc6
+#define AXP717_VBUS_V_L 0xc7
+#define AXP717_VSYS_V_H 0xc8
+#define AXP717_VSYS_V_L 0xc9
+#define AXP717_BATT_CHRG_I_H 0xca
+#define AXP717_BATT_CHRG_I_L 0xcb
+#define AXP717_ADC_DATA_SEL 0xcd
+#define AXP717_ADC_DATA_H 0xce
+#define AXP717_ADC_DATA_L 0xcf
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
@@ -484,6 +510,7 @@ enum {
AXP717_CLDO3,
AXP717_CLDO4,
AXP717_CPUSLDO,
+ AXP717_BOOST,
AXP717_REG_ID_MAX,
};
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
deleted file mode 100644
index 43dfca1c9702..000000000000
--- a/include/linux/mfd/ds1wm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* MFD cell driver data for the DS1WM driver
- *
- * to be defined in the MFD device that is
- * using this driver for one of his sub devices
- */
-
-struct ds1wm_driver_data {
- int active_high;
- int clock_rate;
- /* in milliseconds, the amount of time to
- * sleep following a reset pulse. Zero
- * should work if your bus devices recover
- * time respects the 1-wire spec since the
- * ds1wm implements the precise timings of
- * a reset pulse/presence detect sequence.
- */
- unsigned int reset_recover_delay;
-
- /* Say 1 here for big endian Hardware
- * (only relevant with bus-shift > 0
- */
- bool is_hw_big_endian;
-
- /* left shift of register number to get register address offsett.
- * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
- */
- unsigned int bus_shift;
-};
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 54444ff2a5de..20c5e02ed9da 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -217,6 +217,10 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+/* MAX77693_CHG_REG_CHG_CNFG_02 register */
+#define CHG_CNFG_02_CC_SHIFT 0
+#define CHG_CNFG_02_CC_MASK 0x3F
+
/* MAX77693_CHG_REG_CHG_CNFG_03 register */
#define CHG_CNFG_03_TOITH_SHIFT 0
#define CHG_CNFG_03_TOTIME_SHIFT 3
@@ -244,6 +248,7 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
/* MAX77693 CHG_CNFG_09 Register */
+#define CHG_CNFG_09_CHGIN_ILIM_SHIFT 0
#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
/* MAX77693 CHG_CTRL Register */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 644be30b69c8..002e49b2ebd9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index d0f7d1f36c5e..cc647992f3d1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -211,6 +211,7 @@ enum {
enum {
MLX5_PFAULT_SUBTYPE_WQE = 0,
MLX5_PFAULT_SUBTYPE_RDMA = 1,
+ MLX5_PFAULT_SUBTYPE_MEMORY = 2,
};
enum wqe_page_fault_type {
@@ -370,6 +371,7 @@ enum mlx5_driver_event {
MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
};
enum {
@@ -646,10 +648,11 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
struct mlx5_eqe_page_fault {
- __be32 bytes_committed;
union {
struct {
+ __be32 bytes_committed;
u16 reserved1;
__be16 wqe_index;
u16 reserved2;
@@ -659,6 +662,7 @@ struct mlx5_eqe_page_fault {
__be32 pftype_wq;
} __packed wqe;
struct {
+ __be32 bytes_committed;
__be32 r_key;
u16 reserved1;
__be16 packet_length;
@@ -666,6 +670,23 @@ struct mlx5_eqe_page_fault {
__be64 rdma_va;
__be32 pftype_token;
} __packed rdma;
+ struct {
+ u8 flags;
+ u8 reserved1;
+ __be16 post_demand_fault_pages;
+ __be16 pre_demand_fault_pages;
+ __be16 token47_32;
+ __be32 token31_0;
+ /*
+ * FW changed from specifying the fault size in byte
+ * count to 4k pages granularity. The size specified
+ * in pages uses bits 31:12, to keep backward
+ * compatibility.
+ */
+ __be32 demand_fault_pages;
+ __be32 mkey;
+ __be64 va;
+ } __packed memory;
} __packed;
} __packed;
@@ -1370,6 +1391,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
+#define MLX5_CAP_ODP_SCHEME(mdev, cap) \
+ (MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ mem_page_fault) ? \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ memory_page_fault_scheme_cap.cap) : \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ transport_page_fault_scheme_cap.cap))
+
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9f42834f57c5..e23c692a34c7 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -645,6 +645,7 @@ struct mlx5_priv {
struct mlx5_sf_hw_table *sf_hw_table;
struct mlx5_sf_table *sf_table;
#endif
+ struct blocking_notifier_head lag_nh;
};
enum mlx5_device_state {
@@ -1183,7 +1184,6 @@ bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 620a5c305123..96d369112bfa 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -316,6 +316,7 @@ enum {
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
MLX5_CMD_OP_GENERATE_WQE = 0xb17,
+ MLX5_CMD_OPCODE_QUERY_VUID = 0xb22,
MLX5_CMD_OP_MAX
};
@@ -1412,11 +1413,13 @@ struct mlx5_ifc_atomic_caps_bits {
u8 reserved_at_e0[0x720];
};
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
u8 reserved_at_0[0x40];
u8 sig[0x1];
- u8 reserved_at_41[0x1f];
+ u8 reserved_at_41[0x4];
+ u8 page_prefetch[0x1];
+ u8 reserved_at_46[0x1a];
u8 reserved_at_60[0x20];
@@ -1430,7 +1433,20 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
- u8 reserved_at_120[0x6E0];
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
+
+ struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+ u8 reserved_at_400[0x200];
+
+ u8 mem_page_fault[0x1];
+ u8 reserved_at_601[0x1f];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_tls_cap_bits {
@@ -1978,7 +1994,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5a0[0x10];
u8 enhanced_cqe_compression[0x1];
- u8 reserved_at_5b1[0x2];
+ u8 reserved_at_5b1[0x1];
+ u8 crossing_vhca_mkey[0x1];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -2047,7 +2064,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 dynamic_msix_table_size[0xc];
u8 reserved_at_740[0xc];
u8 min_dynamic_vf_msix_table_size[0x4];
- u8 reserved_at_750[0x4];
+ u8 reserved_at_750[0x2];
+ u8 data_direct[0x1];
+ u8 reserved_at_753[0x1];
u8 max_dynamic_vf_msix_table_size[0xc];
u8 reserved_at_760[0x3];
@@ -2075,7 +2094,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80];
u8 migratable[0x1];
- u8 reserved_at_81[0x1f];
+ u8 reserved_at_81[0x11];
+ u8 query_vuid[0x1];
+ u8 reserved_at_93[0x5];
+ u8 umr_log_entity_size_5[0x1];
+ u8 reserved_at_99[0x7];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
@@ -2115,7 +2138,7 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 ts_cqe_metadata_size2wqe_counter[0x5];
u8 reserved_at_250[0x10];
- u8 reserved_at_260[0x120];
+ u8 reserved_at_260[0x20];
u8 format_select_dw_gtpu_dw_0[0x8];
u8 format_select_dw_gtpu_dw_1[0x8];
@@ -2130,7 +2153,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
- u8 reserved_at_3a0[0x10];
+ u8 reserved_at_3a0[0xa];
+ u8 max_mkey_log_entity_size_mtt[0x6];
u8 max_rqt_vhca_id[0x10];
u8 reserved_at_3c0[0x20];
@@ -4267,6 +4291,7 @@ enum {
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
+ MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
};
struct mlx5_ifc_mkc_bits {
@@ -4309,14 +4334,16 @@ struct mlx5_ifc_mkc_bits {
u8 bsf_octword_size[0x20];
- u8 reserved_at_120[0x80];
+ u8 reserved_at_120[0x60];
+
+ u8 crossing_target_vhca_id[0x10];
+ u8 reserved_at_190[0x10];
u8 translations_octword_size[0x20];
u8 reserved_at_1c0[0x19];
u8 relaxed_ordering_read[0x1];
- u8 reserved_at_1d9[0x1];
- u8 log_page_size[0x5];
+ u8 log_page_size[0x6];
u8 reserved_at_1e0[0x20];
};
@@ -5253,6 +5280,36 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 state[0x4];
};
+struct mlx5_ifc_array1024_auto_bits {
+ u8 array1024_auto[32][0x20];
+};
+
+struct mlx5_ifc_query_vuid_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x40];
+
+ u8 query_vfs_vuid[0x1];
+ u8 data_direct[0x1];
+ u8 reserved_at_62[0xe];
+ u8 vhca_id[0x10];
+};
+
+struct mlx5_ifc_query_vuid_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1a0];
+
+ u8 reserved_at_1e0[0x10];
+ u8 num_of_entries[0x10];
+
+ struct mlx5_ifc_array1024_auto_bits vuid[];
+};
+
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
@@ -7357,6 +7414,30 @@ struct mlx5_ifc_qp_2err_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_trans_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0x4];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
+
+ u8 reserved_at_20[0x8];
+ u8 fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0xf];
+ u8 fault_token_47_32[0x10];
+
+ u8 fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+ struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+ struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_page_fault_resume_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7373,13 +7454,8 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 error[0x1];
- u8 reserved_at_41[0x4];
- u8 page_fault_type[0x3];
- u8 wq_number[0x18];
-
- u8 reserved_at_60[0x8];
- u8 token[0x18];
+ union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+ page_fault_info;
};
struct mlx5_ifc_nop_out_bits {
@@ -9131,7 +9207,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 pg_access[0x1];
u8 mkey_umem_valid[0x1];
- u8 reserved_at_62[0x1e];
+ u8 data_direct[0x1];
+ u8 reserved_at_63[0x1d];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 147073601716..ecf63d2b0582 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -98,7 +98,11 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#ifndef PHYSMEM_END
+# ifdef MAX_PHYSMEM_BITS
# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
+# else
+# define PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63))
+# endif
#endif
#include <asm/page.h>
@@ -334,12 +338,16 @@ extern unsigned int kobjsize(const void *objp);
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
-# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
-# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
-# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */
-# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
-# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
-#ifdef CONFIG_PPC
+# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
+# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
+# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
+# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
+#if CONFIG_ARCH_PKEY_BITS > 3
+# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
+#else
+# define VM_PKEY_BIT3 0
+#endif
+#if CONFIG_ARCH_PKEY_BITS > 4
# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
#else
# define VM_PKEY_BIT4 0
@@ -363,7 +371,7 @@ extern unsigned int kobjsize(const void *objp);
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
-#elif defined(CONFIG_PPC)
+#elif defined(CONFIG_PPC64)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
@@ -378,8 +386,8 @@ extern unsigned int kobjsize(const void *objp);
#endif
#if defined(CONFIG_ARM64_MTE)
-# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */
-# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */
+# define VM_MTE VM_HIGH_ARCH_4 /* Use Tagged memory for access control */
+# define VM_MTE_ALLOWED VM_HIGH_ARCH_5 /* Tagged memory permitted */
#else
# define VM_MTE VM_NONE
# define VM_MTE_ALLOWED VM_NONE
@@ -413,6 +421,8 @@ extern unsigned int kobjsize(const void *objp);
#ifdef CONFIG_64BIT
#define VM_DROPPABLE_BIT 40
#define VM_DROPPABLE BIT(VM_DROPPABLE_BIT)
+#elif defined(CONFIG_PPC32)
+#define VM_DROPPABLE VM_ARCH_1
#else
#define VM_DROPPABLE VM_NONE
#endif
@@ -1009,27 +1019,6 @@ static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
return mas_prev(&vmi->mas, 0);
}
-static inline
-struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
-{
- return mas_prev_range(&vmi->mas, 0);
-}
-
-static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
-{
- return vmi->mas.index;
-}
-
-static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
-{
- return vmi->mas.last + 1;
-}
-static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
- unsigned long count)
-{
- return mas_expected_entries(&vmi->mas, count);
-}
-
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
@@ -1253,8 +1242,7 @@ static inline int folio_mapcount(const struct folio *folio)
if (likely(!folio_test_large(folio))) {
mapcount = atomic_read(&folio->_mapcount) + 1;
- /* Handle page_has_type() pages */
- if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
+ if (page_mapcount_is_type(mapcount))
mapcount = 0;
return mapcount;
}
@@ -1601,6 +1589,7 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
+void unpin_user_folio(struct folio *folio, unsigned long npages);
void unpin_folios(struct folio **folios, unsigned long nfolios);
static inline bool is_cow_mapping(vm_flags_t flags)
@@ -1749,6 +1738,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
}
}
+
+bool folio_use_access_time(struct folio *folio);
#else /* !CONFIG_NUMA_BALANCING */
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
@@ -1802,6 +1793,10 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{
}
+static inline bool folio_use_access_time(struct folio *folio)
+{
+ return false;
+}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -2151,14 +2146,19 @@ static inline size_t folio_size(const struct folio *folio)
* MM ("mapped shared"), or if the folio is only mapped into a single MM
* ("mapped exclusively").
*
+ * For KSM folios, this function also returns "mapped shared" when a folio is
+ * mapped multiple times into the same MM, because the individual page mappings
+ * are independent.
+ *
* As precise information is not easily available for all folios, this function
* estimates the number of MMs ("sharers") that are currently mapping a folio
* using the number of times the first page of the folio is currently mapped
* into page tables.
*
- * For small anonymous folios (except KSM folios) and anonymous hugetlb folios,
- * the return value will be exactly correct, because they can only be mapped
- * at most once into an MM, and they cannot be partially mapped.
+ * For small anonymous folios and anonymous hugetlb folios, the return
+ * value will be exactly correct: non-KSM folios can only be mapped at most once
+ * into an MM, and they cannot be partially mapped. KSM folios are
+ * considered shared even if mapped multiple times into the same MM.
*
* For other folios, the result can be fuzzy:
* #. For partially-mappable large folios (THP), the return value can wrongly
@@ -2167,9 +2167,6 @@ static inline size_t folio_size(const struct folio *folio)
* #. For pagecache folios (including hugetlb), the return value can wrongly
* indicate "mapped shared" (false positive) when two VMAs in the same MM
* cover the same file range.
- * #. For (small) KSM folios, the return value can wrongly indicate "mapped
- * shared" (false positive), when the folio is mapped multiple times into
- * the same MM.
*
* Further, this function only considers current page table mappings that
* are tracked using the folio mapcount(s).
@@ -2203,26 +2200,10 @@ static inline bool folio_likely_mapped_shared(struct folio *folio)
return atomic_read(&folio->_mapcount) > 0;
}
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
-{
- return 0;
-}
-#endif
-
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
static inline int arch_make_folio_accessible(struct folio *folio)
{
- int ret;
- long i, nr = folio_nr_pages(folio);
-
- for (i = 0; i < nr; i++) {
- ret = arch_make_page_accessible(folio_page(folio, i));
- if (ret)
- break;
- }
-
- return ret;
+ return 0;
}
#endif
@@ -2402,11 +2383,40 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
-int follow_pte(struct vm_area_struct *vma, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+struct follow_pfnmap_args {
+ /**
+ * Inputs:
+ * @vma: Pointer to @vm_area_struct struct
+ * @address: the virtual address to walk
+ */
+ struct vm_area_struct *vma;
+ unsigned long address;
+ /**
+ * Internals:
+ *
+ * The caller shouldn't touch any of these.
+ */
+ spinlock_t *lock;
+ pte_t *ptep;
+ /**
+ * Outputs:
+ *
+ * @pfn: the PFN of the address
+ * @pgprot: the pgprot_t of the mapping
+ * @writable: whether the mapping is writable
+ * @special: whether the mapping is a special mapping (real PFN maps)
+ */
+ unsigned long pfn;
+ pgprot_t pgprot;
+ bool writable;
+ bool special;
+};
+int follow_pfnmap_start(struct follow_pfnmap_args *args);
+void follow_pfnmap_end(struct follow_pfnmap_args *args);
+
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -2534,11 +2544,6 @@ int set_page_dirty_lock(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-extern unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks, bool for_stack);
-
/*
* Flags used by change_protection(). For now we make it a bitmap so
* that we can pass in multiple flags just like parameters. However
@@ -2559,21 +2564,6 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
-bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
-bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
-static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
-{
- /*
- * We want to check manually if we can change individual PTEs writable
- * if we can't do that automatically for all PTEs in a mapping. For
- * private mappings, that's always the case when we have write
- * permissions as we properly have to handle COW.
- */
- if (vma->vm_flags & VM_SHARED)
- return vma_wants_writenotify(vma, vma->vm_page_prot);
- return !!(vma->vm_flags & VM_WRITE);
-
-}
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern long change_protection(struct mmu_gather *tlb,
@@ -2697,6 +2687,30 @@ static inline pte_t pte_mkspecial(pte_t pte)
}
#endif
+#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return false;
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
+
+#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return false;
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
+
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
@@ -2889,7 +2903,7 @@ static inline void pagetable_free(struct ptdesc *pt)
__free_pages(page, compound_order(page));
}
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
bool ptlock_alloc(struct ptdesc *ptdesc);
@@ -2947,7 +2961,7 @@ static inline bool ptlock_init(struct ptdesc *ptdesc)
return true;
}
-#else /* !USE_SPLIT_PTE_PTLOCKS */
+#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
@@ -2962,7 +2976,7 @@ static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
-#endif /* USE_SPLIT_PTE_PTLOCKS */
+#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{
@@ -3022,7 +3036,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
-#if USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
@@ -3281,78 +3295,9 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next);
-extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff);
-extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void unlink_file_vma(struct vm_area_struct *);
-extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
- unsigned long addr, unsigned long len, pgoff_t pgoff,
- bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
-struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long vm_flags,
- struct mempolicy *policy,
- struct vm_userfaultfd_ctx uffd_ctx,
- struct anon_vma_name *anon_name);
-
-/* We are about to modify the VMA's flags. */
-static inline struct vm_area_struct
-*vma_modify_flags(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx,
- anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or anon_name. */
-static inline struct vm_area_struct
-*vma_modify_flags_name(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- unsigned long new_flags,
- struct anon_vma_name *new_name)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
-}
-
-/* We are about to modify the VMA's memory policy. */
-static inline struct vm_area_struct
-*vma_modify_policy(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct mempolicy *new_pol)
-{
- return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
- new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or uffd context. */
-static inline struct vm_area_struct
-*vma_modify_flags_uffd(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags,
- struct vm_userfaultfd_ctx new_ctx)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), new_ctx, anon_vma_name(vma));
-}
+int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -3385,10 +3330,6 @@ extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags,
const struct vm_special_mapping *spec);
-/* This is an obsolete alternative to _install_special_mapping. */
-extern int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long flags, struct page **pages);
unsigned long randomize_stack_top(unsigned long stack_top);
unsigned long randomize_page(unsigned long start, unsigned long range);
@@ -3414,14 +3355,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
-extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -3649,9 +3590,6 @@ static inline vm_fault_t vmf_fs_error(int err)
return VM_FAULT_SIGBUS;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags);
-
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
@@ -4187,18 +4125,18 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
#ifdef CONFIG_UNACCEPTED_MEMORY
-bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
-void accept_memory(phys_addr_t start, phys_addr_t end);
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
+void accept_memory(phys_addr_t start, unsigned long size);
#else
static inline bool range_contains_unaccepted_memory(phys_addr_t start,
- phys_addr_t end)
+ unsigned long size)
{
return false;
}
-static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
@@ -4206,9 +4144,7 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end)
static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
{
- phys_addr_t paddr = pfn << PAGE_SHIFT;
-
- return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
+ return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
}
void vma_pgtable_walk_begin(struct vm_area_struct *vma);
@@ -4216,4 +4152,71 @@ void vma_pgtable_walk_end(struct vm_area_struct *vma);
int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
+#ifdef CONFIG_64BIT
+int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
+#else
+static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
+{
+ /* noop on 32 bit */
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+ int i;
+ struct alloc_tag *tag;
+ unsigned int nr_pages = 1 << new_order;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = pgalloc_tag_get(&folio->page);
+ if (!tag)
+ return;
+
+ for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
+ union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
+
+ if (ref) {
+ /* Set new reference to point to the original tag */
+ alloc_tag_ref_set(ref, tag);
+ put_page_tag_ref(ref);
+ }
+ }
+}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+ struct alloc_tag *tag;
+ union codetag_ref *ref;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ ref = get_page_tag_ref(&new->page);
+ if (!ref)
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(ref, folio_nr_pages(new));
+
+ __alloc_tag_ref_set(ref, tag);
+
+ put_page_tag_ref(ref);
+}
+#else /* !CONFIG_MEM_ALLOC_PROFILING */
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+}
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 485424979254..6e3bdf8e38bc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -109,7 +109,7 @@ struct page {
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
- * Used for swp_entry_t if PageSwapCache.
+ * Used for swp_entry_t if swapcache flag set.
* Indicates order in the buddy system if PageBuddy.
*/
unsigned long private;
@@ -660,6 +660,9 @@ struct vma_numab_state {
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
+ *
+ * Only explicitly marked struct members may be accessed by RCU readers before
+ * getting a stable reference.
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
@@ -675,7 +678,11 @@ struct vm_area_struct {
#endif
};
- struct mm_struct *vm_mm; /* The address space we belong to. */
+ /*
+ * The address space we belong to.
+ * Unstable RCU readers are allowed to read this.
+ */
+ struct mm_struct *vm_mm;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
@@ -688,7 +695,10 @@ struct vm_area_struct {
};
#ifdef CONFIG_PER_VMA_LOCK
- /* Flag to indicate areas detached from the mm->mm_mt tree */
+ /*
+ * Flag to indicate areas detached from the mm->mm_mt tree.
+ * Unstable RCU readers are allowed to read this.
+ */
bool detached;
/*
@@ -706,6 +716,7 @@ struct vm_area_struct {
* slowpath.
*/
int vm_lock_seq;
+ /* Unstable RCU readers are allowed to read this. */
struct vma_lock *vm_lock;
#endif
@@ -947,7 +958,7 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
@@ -1313,6 +1324,9 @@ struct vm_special_mapping {
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
+
+ void (*close)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma);
};
enum tlb_flush_reason {
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index a2f6179b672b..bff5706b76e1 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -16,9 +16,6 @@
#include <asm/tlbbatch.h>
#endif
-#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
-#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
- IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2c7928a50907..f0ac2e469b32 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -11,18 +11,6 @@
struct mmc_data;
struct mmc_request;
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
- MMC_BLK_NEW_REQUEST,
-};
-
struct mmc_command {
u32 opcode;
u32 arg;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 88c6a76042ee..8fc2b328ec4d 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
@@ -264,16 +265,6 @@ struct mmc_cqe_ops {
void (*cqe_recovery_finish)(struct mmc_host *host);
};
-struct mmc_async_req {
- /* active mmc request */
- struct mmc_request *mrq;
- /*
- * Check error status of completed mmc request.
- * Returns 0 if success otherwise non zero.
- */
- enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
-};
-
/**
* struct mmc_slot - MMC slot functions
*
@@ -291,20 +282,6 @@ struct mmc_slot {
void *handler_priv;
};
-/**
- * mmc_context_info - synchronization details for mmc context
- * @is_done_rcv wake up reason was done request
- * @is_new_req wake up reason was new request
- * @is_waiting_last_req mmc context waiting for single running request
- * @wait wait queue
- */
-struct mmc_context_info {
- bool is_done_rcv;
- bool is_new_req;
- bool is_waiting_last_req;
- wait_queue_head_t wait;
-};
-
struct regulator;
struct mmc_pwrseq;
@@ -672,7 +649,8 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
host->err_stats[stat] += 1;
}
-int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp);
+int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
+ u8 value, u8 *resp);
int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1dc6248feb83..17506e4a2835 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -666,11 +666,6 @@ enum zone_watermarks {
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
-#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
-#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
-#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
-#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
-
/*
* Flags used in pcp->flags field.
*
@@ -1016,6 +1011,32 @@ enum zone_flags {
ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
+static inline unsigned long wmark_pages(const struct zone *z,
+ enum zone_watermarks w)
+{
+ return z->_watermark[w] + z->watermark_boost;
+}
+
+static inline unsigned long min_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_MIN);
+}
+
+static inline unsigned long low_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_LOW);
+}
+
+static inline unsigned long high_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_HIGH);
+}
+
+static inline unsigned long promo_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_PROMO);
+}
+
static inline unsigned long zone_managed_pages(struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
@@ -1688,7 +1709,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
zone = zonelist_zone(z))
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
- for (zone = z->zone; \
+ for (zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
@@ -1724,7 +1745,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
nid = first_node(*nodes);
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
- return (!z->zone) ? true : false;
+ return (!zonelist_zone(z)) ? true : false;
}
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index cd4d5c8781f5..b1b219bc3422 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -9,6 +9,7 @@ struct mnt_idmap;
struct user_namespace;
extern struct mnt_idmap nop_mnt_idmap;
+extern struct mnt_idmap invalid_mnt_idmap;
extern struct user_namespace init_user_ns;
typedef struct {
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 8f882f5881e8..70b366b64816 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -3,6 +3,9 @@
#define _NAMESPACE_H_
#ifdef __KERNEL__
+#include <linux/cleanup.h>
+#include <linux/err.h>
+
struct mnt_namespace;
struct fs_struct;
struct user_namespace;
@@ -11,6 +14,7 @@ struct ns_common;
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
+DEFINE_FREE(put_mnt_ns, struct mnt_namespace *, if (!IS_ERR_OR_NULL(_T)) put_mnt_ns(_T))
extern struct ns_common *from_mnt_ns(struct mnt_namespace *);
extern const struct file_operations proc_mounts_operations;
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index eb0d1c1db208..47be46f36435 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -40,79 +40,26 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-static inline MPI mpi_new(unsigned int nbits)
-{
- return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
-}
-
MPI mpi_copy(MPI a);
-MPI mpi_alloc_like(MPI a);
-void mpi_snatch(MPI w, MPI u);
-MPI mpi_set(MPI w, MPI u);
-MPI mpi_set_ui(MPI w, unsigned long u);
-MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
-
-/* Constants used to return constant MPIs. See mpi_init if you
- * want to add more constants.
- */
-#define MPI_NUMBER_OF_CONSTANTS 6
-enum gcry_mpi_constants {
- MPI_C_ZERO,
- MPI_C_ONE,
- MPI_C_TWO,
- MPI_C_THREE,
- MPI_C_FOUR,
- MPI_C_EIGHT
-};
-
-MPI mpi_const(enum gcry_mpi_constants no);
/*-- mpicoder.c --*/
-
-/* Different formats of external big integer representation. */
-enum gcry_mpi_format {
- GCRYMPI_FMT_NONE = 0,
- GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
- GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
- GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
- GCRYMPI_FMT_HEX = 4, /* Hex format. */
- GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
- GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
-};
-
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
-int mpi_fromstr(MPI val, const char *str);
-MPI mpi_scanval(const char *string);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
-int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
- size_t buflen, size_t *nwritten, MPI a);
/*-- mpi-mod.c --*/
-void mpi_mod(MPI rem, MPI dividend, MPI divisor);
-
-/* Context used with Barrett reduction. */
-struct barrett_ctx_s;
-typedef struct barrett_ctx_s *mpi_barrett_t;
-
-mpi_barrett_t mpi_barrett_init(MPI m, int copy);
-void mpi_barrett_free(mpi_barrett_t ctx);
-void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
-void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
+int mpi_mod(MPI rem, MPI dividend, MPI divisor);
/*-- mpi-pow.c --*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
@@ -120,7 +67,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
-int mpi_cmpabs(MPI u, MPI v);
/*-- mpi-sub-ui.c --*/
int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
@@ -129,138 +75,22 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
int mpi_test_bit(MPI a, unsigned int n);
-void mpi_set_bit(MPI a, unsigned int n);
-void mpi_set_highbit(MPI a, unsigned int n);
-void mpi_clear_highbit(MPI a, unsigned int n);
-void mpi_clear_bit(MPI a, unsigned int n);
-void mpi_rshift_limbs(MPI a, unsigned int count);
-void mpi_rshift(MPI x, MPI a, unsigned int n);
-void mpi_lshift_limbs(MPI a, unsigned int count);
-void mpi_lshift(MPI x, MPI a, unsigned int n);
+int mpi_set_bit(MPI a, unsigned int n);
+int mpi_rshift(MPI x, MPI a, unsigned int n);
/*-- mpi-add.c --*/
-void mpi_add_ui(MPI w, MPI u, unsigned long v);
-void mpi_add(MPI w, MPI u, MPI v);
-void mpi_sub(MPI w, MPI u, MPI v);
-void mpi_addm(MPI w, MPI u, MPI v, MPI m);
-void mpi_subm(MPI w, MPI u, MPI v, MPI m);
+int mpi_add(MPI w, MPI u, MPI v);
+int mpi_sub(MPI w, MPI u, MPI v);
+int mpi_addm(MPI w, MPI u, MPI v, MPI m);
+int mpi_subm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-mul.c --*/
-void mpi_mul(MPI w, MPI u, MPI v);
-void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+int mpi_mul(MPI w, MPI u, MPI v);
+int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-div.c --*/
-void mpi_tdiv_r(MPI rem, MPI num, MPI den);
-void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-
-/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI a, MPI n);
-
-/*-- ec.c --*/
-
-/* Object to represent a point in projective coordinates */
-struct gcry_mpi_point {
- MPI x;
- MPI y;
- MPI z;
-};
-
-typedef struct gcry_mpi_point *MPI_POINT;
-
-/* Models describing an elliptic curve */
-enum gcry_mpi_ec_models {
- /* The Short Weierstrass equation is
- * y^2 = x^3 + ax + b
- */
- MPI_EC_WEIERSTRASS = 0,
- /* The Montgomery equation is
- * by^2 = x^3 + ax^2 + x
- */
- MPI_EC_MONTGOMERY,
- /* The Twisted Edwards equation is
- * ax^2 + y^2 = 1 + bx^2y^2
- * Note that we use 'b' instead of the commonly used 'd'.
- */
- MPI_EC_EDWARDS
-};
-
-/* Dialects used with elliptic curves */
-enum ecc_dialects {
- ECC_DIALECT_STANDARD = 0,
- ECC_DIALECT_ED25519,
- ECC_DIALECT_SAFECURVE
-};
-
-/* This context is used with all our EC functions. */
-struct mpi_ec_ctx {
- enum gcry_mpi_ec_models model; /* The model describing this curve. */
- enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
- int flags; /* Public key flags (not always used). */
- unsigned int nbits; /* Number of bits. */
-
- /* Domain parameters. Note that they may not all be set and if set
- * the MPIs may be flagged as constant.
- */
- MPI p; /* Prime specifying the field GF(p). */
- MPI a; /* First coefficient of the Weierstrass equation. */
- MPI b; /* Second coefficient of the Weierstrass equation. */
- MPI_POINT G; /* Base point (generator). */
- MPI n; /* Order of G. */
- unsigned int h; /* Cofactor. */
-
- /* The actual key. May not be set. */
- MPI_POINT Q; /* Public key. */
- MPI d; /* Private key. */
-
- const char *name; /* Name of the curve. */
-
- /* This structure is private to mpi/ec.c! */
- struct {
- struct {
- unsigned int a_is_pminus3:1;
- unsigned int two_inv_p:1;
- } valid; /* Flags to help setting the helper vars below. */
-
- int a_is_pminus3; /* True if A = P - 3. */
-
- MPI two_inv_p;
-
- mpi_barrett_t p_barrett;
-
- /* Scratch variables. */
- MPI scratch[11];
-
- /* Helper for fast reduction. */
- /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
- /* MPI s[10]; */
- /* MPI c; */
- } t;
-
- /* Curve specific computation routines for the field. */
- void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
- void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
- void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
-};
-
-void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
- enum ecc_dialects dialect,
- int flags, MPI p, MPI a, MPI b);
-void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
-MPI_POINT mpi_point_new(unsigned int nbits);
-void mpi_point_release(MPI_POINT p);
-void mpi_point_init(MPI_POINT p);
-void mpi_point_free_parts(MPI_POINT p);
-int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
-void mpi_ec_add_points(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx);
-void mpi_ec_mul_point(MPI_POINT result,
- MPI scalar, MPI_POINT point,
- struct mpi_ec_ctx *ctx);
-int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
+int mpi_tdiv_r(MPI rem, MPI num, MPI den);
+int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
/* inline functions */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 944979763825..b10093c4d00e 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -554,6 +554,8 @@ enum {
MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
/* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* PCI MSIs cannot be steered separately to CPU cores */
+ MSI_FLAG_NO_AFFINITY = (1 << 21),
};
/**
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index b4fa92a6e44b..1b56796f6cb3 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/barrier.h>
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index b2996dc987ff..1e4208040956 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -103,6 +103,8 @@ enum nand_page_io_req_type {
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
+ * @continuous: no need to start over the operation at the end of each page, the
+ * NAND device will automatically prepare the next one
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
@@ -125,6 +127,7 @@ struct nand_page_io_req {
void *in;
} oobbuf;
int mode;
+ bool continuous;
};
const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
@@ -906,19 +909,19 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
}
/**
- * nand_io_iter_init - Initialize a NAND I/O iterator
+ * nand_io_page_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
- * layer.
+ * layer for page jumps.
*/
-static inline void nanddev_io_iter_init(struct nand_device *nand,
- enum nand_page_io_req_type reqtype,
- loff_t offs, struct mtd_oob_ops *req,
- struct nand_io_iter *iter)
+static inline void nanddev_io_page_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
@@ -937,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
+ iter->req.continuous = false;
+}
+
+/**
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for block jumps (no OOB)
+ *
+ * In practice only reads may leverage this iterator.
+ */
+static inline void nanddev_io_block_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ unsigned int offs_in_eb;
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = 0;
+ iter->oobbytes_per_page = 0;
+ iter->dataleft = req->len;
+ iter->oobleft = 0;
+ iter->req.databuf.in = req->datbuf;
+ offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
+ iter->req.datalen = min_t(unsigned int,
+ nanddev_eraseblock_size(nand) - offs_in_eb,
+ iter->dataleft);
+ iter->req.oobbuf.in = NULL;
+ iter->req.ooblen = 0;
+ iter->req.continuous = true;
}
/**
@@ -963,6 +1003,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
}
/**
+ * nand_io_iter_next_block - Move to the next block
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next block.
+ * No OOB handling available.
+ */
+static inline void nanddev_io_iter_next_block(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_eraseblock(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->req.dataoffs = 0;
+ iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
+ iter->dataleft);
+}
+
+/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
* @iter: NAND I/O iterator
@@ -990,13 +1049,28 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
- * Should be used for iterate over pages that are contained in an MTD request.
+ * Should be used for iterating over pages that are contained in an MTD request.
*/
#define nanddev_io_for_each_page(nand, type, start, req, iter) \
- for (nanddev_io_iter_init(nand, type, start, req, iter); \
+ for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
+/**
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
+ * request, one block at a time
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over blocks that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_block(nand, type, start, req, iter) \
+ for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_block(nand, iter))
+
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 5c19ead60499..702e5fb13dae 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -312,6 +312,8 @@ struct spinand_ecc_info {
#define SPINAND_HAS_QE_BIT BIT(0)
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
+#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
/**
* struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
@@ -336,6 +338,7 @@ struct spinand_ondie_ecc_conf {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @set_cont_read: enable/disable continuous cached reads
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
@@ -354,6 +357,8 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
};
#define SPINAND_ID(__method, ...) \
@@ -379,6 +384,9 @@ struct spinand_info {
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func,
+#define SPINAND_CONT_READ(__set_cont_read) \
+ .set_cont_read = __set_cont_read,
+
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
{ \
@@ -422,6 +430,12 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @cont_read_possible: Field filled by the core once the whole system
+ * configuration is known to tell whether continuous reads are
+ * suitable to use or not in general with this chip/configuration.
+ * A per-transfer check must of course be done to ensure it is
+ * actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
*/
struct spinand_device {
@@ -451,6 +465,10 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
+
+ bool cont_read_possible;
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
};
/**
@@ -517,6 +535,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a561c629d89f..2bf91b57591b 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -49,7 +49,6 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
-#ifndef CONFIG_PREEMPT_RT
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -65,6 +64,18 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
+/**
+ * mutex_init_with_key - initialize a mutex with a given lockdep key
+ * @mutex: the mutex to be initialized
+ * @key: the lockdep key to be associated with the mutex
+ *
+ * Initialize the mutex to the unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+#define mutex_init_with_key(mutex, key) __mutex_init((mutex), #mutex, (key))
+
+#ifndef CONFIG_PREEMPT_RT
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -111,12 +122,6 @@ do { \
__mutex_rt_init((mutex), name, key); \
} while (0)
-#define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
-} while (0)
#endif /* CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_MUTEXES
diff --git a/include/linux/net.h b/include/linux/net.h
index 688320b79fcc..b75bc534c1b3 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -322,6 +322,25 @@ static inline bool sendpage_ok(struct page *page)
return !PageSlab(page) && page_count(page) >= 1;
}
+/*
+ * Check sendpage_ok on contiguous pages.
+ */
+static inline bool sendpages_ok(struct page *page, size_t len, size_t offset)
+{
+ struct page *p = page + (offset >> PAGE_SHIFT);
+ size_t count = 0;
+
+ while (count < len) {
+ if (!sendpage_ok(p))
+ return false;
+
+ p++;
+ count += PAGE_SIZE;
+ }
+
+ return true;
+}
+
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e87b5e488325..8896705ccd63 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3325,6 +3325,12 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
+ /* Paired with READ_ONCE() from dev_watchdog() */
+ WRITE_ONCE(dev_queue->trans_start, jiffies);
+
+ /* This barrier is paired with smp_mb() from dev_watchdog() */
+ smp_mb__before_atomic();
+
/* Must be an atomic op see netif_txq_try_stop() */
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -3451,6 +3457,12 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
if (likely(dql_avail(&dev_queue->dql) >= 0))
return;
+ /* Paired with READ_ONCE() from dev_watchdog() */
+ WRITE_ONCE(dev_queue->trans_start, jiffies);
+
+ /* This barrier is paired with smp_mb() from dev_watchdog() */
+ smp_mb__before_atomic();
+
set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
/*
@@ -5029,6 +5041,24 @@ void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
void netif_inherit_tso_max(struct net_device *to,
const struct net_device *from);
+static inline unsigned int
+netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
+{
+ /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
+ return skb->protocol == htons(ETH_P_IPV6) ?
+ READ_ONCE(dev->gro_max_size) :
+ READ_ONCE(dev->gro_ipv4_max_size);
+}
+
+static inline unsigned int
+netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
+{
+ /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
+ return skb->protocol == htons(ETH_P_IPV6) ?
+ READ_ONCE(dev->gso_max_size) :
+ READ_ONCE(dev->gso_ipv4_max_size);
+}
+
static inline bool netif_is_macsec(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACSEC;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2683b2b77612..2b8aac2c70ad 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -376,15 +376,11 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
struct nf_conn;
enum nf_nat_manip_type;
struct nlattr;
-enum ip_conntrack_dir;
struct nf_nat_hook {
int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
const struct nlattr *attr);
void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
- unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
- enum nf_nat_manip_type mtype,
- enum ip_conntrack_dir dir);
void (*remove_nat_bysrc)(struct nf_conn *ct);
};
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index c47443e7a97e..5eaceef41e6c 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -38,11 +38,8 @@ static inline void folio_start_private_2(struct folio *folio)
folio_set_private_2(folio);
}
-/* Marks used on xarray-based buffers */
-#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
-#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
-
enum netfs_io_source {
+ NETFS_SOURCE_UNKNOWN,
NETFS_FILL_WITH_ZEROES,
NETFS_DOWNLOAD_FROM_SERVER,
NETFS_READ_FROM_CACHE,
@@ -73,6 +70,7 @@ struct netfs_inode {
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
+#define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */
};
/*
@@ -133,9 +131,11 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio)
struct netfs_io_stream {
/* Submission tracking */
struct netfs_io_subrequest *construct; /* Op being constructed */
+ size_t sreq_max_len; /* Maximum size of a subrequest */
+ unsigned int sreq_max_segs; /* 0 or max number of segments in an iterator */
unsigned int submit_off; /* Folio offset we're submitting from */
unsigned int submit_len; /* Amount of data left to submit */
- unsigned int submit_max_len; /* Amount I/O can be rounded up to */
+ unsigned int submit_extendable_to; /* Amount I/O can be rounded up to */
void (*prepare_write)(struct netfs_io_subrequest *subreq);
void (*issue_write)(struct netfs_io_subrequest *subreq);
/* Collection tracking */
@@ -176,41 +176,45 @@ struct netfs_io_subrequest {
struct list_head rreq_link; /* Link in rreq->subrequests */
struct iov_iter io_iter; /* Iterator for this subrequest */
unsigned long long start; /* Where to start the I/O */
- size_t max_len; /* Maximum size of the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
+ size_t consumed; /* Amount of read data consumed */
+ size_t prev_donated; /* Amount of data donated from previous subreq */
+ size_t next_donated; /* Amount of data donated from next subreq */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
unsigned int nr_segs; /* Number of segs in io_iter */
- unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* I/O stream this belongs to */
+ unsigned char curr_folioq_slot; /* Folio currently being read */
+ unsigned char curr_folio_order; /* Order of folio */
+ struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
-#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
+#define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */
#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
-#define NETFS_SREQ_HIT_EOF 12 /* Set if we hit the EOF */
};
enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
- NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */
+ NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
- NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_DIO_WRITE, /* This is a direct I/O write */
+ NETFS_PGPRIV2_COPY_TO_CACHE, /* [DEPRECATED] This is writing read data to the cache */
nr__netfs_io_origin
} __mode(byte);
@@ -227,11 +231,14 @@ struct netfs_io_request {
struct address_space *mapping; /* The mapping being accessed */
struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
+ struct readahead_control *ractl; /* Readahead descriptor */
struct list_head proc_link; /* Link in netfs_iorequests */
struct list_head subrequests; /* Contributory I/O operations */
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
#define NR_IO_STREAMS 2 //wreq->nr_io_streams
struct netfs_group *group; /* Writeback group being written back */
+ struct folio_queue *buffer; /* Head of I/O buffer */
+ struct folio_queue *buffer_tail; /* Tail of I/O buffer */
struct iov_iter iter; /* Unencrypted-side iterator */
struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
@@ -245,24 +252,23 @@ struct netfs_io_request {
unsigned int nr_group_rel; /* Number of refs to release on ->group */
spinlock_t lock; /* Lock for queuing subreqs */
atomic_t nr_outstanding; /* Number of ops in progress */
- atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
- size_t upper_len; /* Length can be extended to here */
unsigned long long submitted; /* Amount submitted for I/O so far */
unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */
- short error; /* 0 or error that occurred */
+ long error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
+ u8 buffer_head_slot; /* First slot in ->buffer */
+ u8 buffer_tail_slot; /* Next slot in ->buffer_tail */
unsigned long long i_size; /* Size of the file */
unsigned long long start; /* Start position */
atomic64_t issued_to; /* Write issuer folio cursor */
- unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */
unsigned long long collected_to; /* Point we've collected to */
unsigned long long cleaned_to; /* Position we've cleaned folios to */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
+ size_t prev_donated; /* Fallback for subreq->prev_donated */
refcount_t ref;
unsigned long flags;
-#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
@@ -274,6 +280,7 @@ struct netfs_io_request {
#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */
#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */
+#define NETFS_RREQ_NEED_RETRY 14 /* Need to try retrying */
#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
* write to cache on read */
const struct netfs_request_ops *netfs_ops;
@@ -292,7 +299,7 @@ struct netfs_request_ops {
/* Read request handling */
void (*expand_readahead)(struct netfs_io_request *rreq);
- bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+ int (*prepare_read)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
bool (*is_still_valid)(struct netfs_io_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
@@ -422,7 +429,10 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp);
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
/* (Sub)request management API. */
-void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
+ bool was_async);
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
+ int error, bool was_async);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index ceb70a926b95..9ad727ddfedb 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -8,11 +8,20 @@
#ifndef _LINUX_NFS_H
#define _LINUX_NFS_H
+#include <linux/cred.h>
+#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
+/* The LOCALIO program is entirely private to Linux and is
+ * NOT part of the uapi.
+ */
+#define NFS_LOCALIO_PROGRAM 400122
+#define LOCALIOPROC_NULL 0
+#define LOCALIOPROC_UUID_IS_LOCAL 1
+
/*
* This is the kernel NFS client file handle representation
*/
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index f9df88091c6d..8d7430d9f218 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -281,15 +281,18 @@ enum nfsstat4 {
/* nfs42 */
NFS4ERR_PARTNER_NOTSUPP = 10088,
NFS4ERR_PARTNER_NO_AUTH = 10089,
- NFS4ERR_UNION_NOTSUPP = 10090,
- NFS4ERR_OFFLOAD_DENIED = 10091,
- NFS4ERR_WRONG_LFS = 10092,
- NFS4ERR_BADLABEL = 10093,
- NFS4ERR_OFFLOAD_NO_REQS = 10094,
+ NFS4ERR_UNION_NOTSUPP = 10090,
+ NFS4ERR_OFFLOAD_DENIED = 10091,
+ NFS4ERR_WRONG_LFS = 10092,
+ NFS4ERR_BADLABEL = 10093,
+ NFS4ERR_OFFLOAD_NO_REQS = 10094,
/* xattr (RFC8276) */
- NFS4ERR_NOXATTR = 10095,
- NFS4ERR_XATTR2BIG = 10096,
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
+
+ /* can be used for internal errors */
+ NFS4ERR_FIRST_FREE
};
/* error codes for internal client use */
diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
new file mode 100644
index 000000000000..5fc02df88252
--- /dev/null
+++ b/include/linux/nfs_common.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains constants and methods used by both NFS client and server.
+ */
+#ifndef _LINUX_NFS_COMMON_H
+#define _LINUX_NFS_COMMON_H
+
+#include <linux/errno.h>
+#include <uapi/linux/nfs.h>
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+int nfs_stat_to_errno(enum nfs_stat status);
+int nfs4_stat_to_errno(int stat);
+
+#endif /* _LINUX_NFS_COMMON_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1df86ab98c77..b804346a9741 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/nfslocalio.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -49,6 +50,7 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_LOCAL_IO 10 /* - client is local */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -125,6 +127,13 @@ struct nfs_client {
struct net *cl_net;
struct list_head pending_cb_stateids;
struct rcu_head rcu;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct timespec64 cl_nfssvc_boot;
+ seqlock_t cl_boot_lock;
+ nfs_uuid_t cl_uuid;
+ spinlock_t cl_localio_lock;
+#endif /* CONFIG_NFS_LOCALIO */
};
/*
@@ -158,6 +167,7 @@ struct nfs_server {
#define NFS_MOUNT_WRITE_WAIT 0x02000000
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
#define NFS_MOUNT_SHUTDOWN 0x08000000
+#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -234,12 +244,12 @@ struct nfs_server {
/* the following fields are protected by nfs_client->cl_lock */
struct rb_root state_owners;
#endif
- struct ida openowner_id;
- struct ida lockowner_id;
+ atomic64_t owner_ctr;
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
struct list_head ss_copies;
+ struct list_head ss_src_copies;
unsigned long delegation_gen;
unsigned long mig_gen;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 45623af3e7b8..12d8e47bc5a3 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -446,7 +446,7 @@ struct nfs42_clone_res {
struct stateowner_id {
__u64 create_time;
- __u32 uniquifier;
+ __u64 uniquifier;
};
struct nfs4_open_delegation {
@@ -1854,6 +1854,24 @@ struct nfs_rpc_ops {
};
/*
+ * Helper functions used by NFS client and/or server
+ */
+static inline void encode_opaque_fixed(struct xdr_stream *xdr,
+ const void *buf, size_t len)
+{
+ WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
+}
+
+static inline int decode_opaque_fixed(struct xdr_stream *xdr,
+ void *buf, size_t len)
+{
+ ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
+ if (unlikely(ret < 0))
+ return -EIO;
+ return 0;
+}
+
+/*
* Function vectors etc. for the NFS client
*/
extern const struct nfs_rpc_ops nfs_v2_clientops;
@@ -1866,4 +1884,4 @@ extern const struct rpc_version nfs_version4;
extern const struct rpc_version nfsacl_version3;
extern const struct rpc_program nfsacl_program;
-#endif
+#endif /* _LINUX_NFS_XDR_H */
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
new file mode 100644
index 000000000000..b0dd9b1eef4f
--- /dev/null
+++ b/include/linux/nfslocalio.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+#ifndef __LINUX_NFSLOCALIO_H
+#define __LINUX_NFSLOCALIO_H
+
+/* nfsd_file structure is purposely kept opaque to NFS client */
+struct nfsd_file;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/uuid.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/nfs.h>
+#include <net/net_namespace.h>
+
+/*
+ * Useful to allow a client to negotiate if localio
+ * possible with its server.
+ *
+ * See Documentation/filesystems/nfs/localio.rst for more detail.
+ */
+typedef struct {
+ uuid_t uuid;
+ struct list_head list;
+ struct net __rcu *net; /* nfsd's network namespace */
+ struct auth_domain *dom; /* auth_domain for localio */
+} nfs_uuid_t;
+
+void nfs_uuid_begin(nfs_uuid_t *);
+void nfs_uuid_end(nfs_uuid_t *);
+void nfs_uuid_is_local(const uuid_t *, struct list_head *,
+ struct net *, struct auth_domain *, struct module *);
+void nfs_uuid_invalidate_clients(struct list_head *list);
+void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid);
+
+/* localio needs to map filehandle -> struct nfsd_file */
+extern struct nfsd_file *
+nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *,
+ const struct cred *, const struct nfs_fh *,
+ const fmode_t) __must_hold(rcu);
+
+struct nfsd_localio_operations {
+ bool (*nfsd_serv_try_get)(struct net *);
+ void (*nfsd_serv_put)(struct net *);
+ struct nfsd_file *(*nfsd_open_local_fh)(struct net *,
+ struct auth_domain *,
+ struct rpc_clnt *,
+ const struct cred *,
+ const struct nfs_fh *,
+ const fmode_t);
+ void (*nfsd_file_put_local)(struct nfsd_file *);
+ struct file *(*nfsd_file_file)(struct nfsd_file *);
+} ____cacheline_aligned;
+
+extern void nfsd_localio_ops_init(void);
+extern const struct nfsd_localio_operations *nfs_to;
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
+ struct rpc_clnt *, const struct cred *,
+ const struct nfs_fh *, const fmode_t);
+
+static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio)
+{
+ /*
+ * Once reference to nfsd_serv is dropped, NFSD could be
+ * unloaded, so ensure safe return from nfsd_file_put_local()
+ * by always taking RCU.
+ */
+ rcu_read_lock();
+ nfs_to->nfsd_file_put_local(localio);
+ rcu_read_unlock();
+}
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfsd_localio_ops_init(void)
+{
+}
+static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio)
+{
+}
+#endif /* CONFIG_NFS_LOCALIO */
+
+#endif /* __LINUX_NFSLOCALIO_H */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b61438313a73..9fd7a0ce9c1a 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -107,11 +107,11 @@ extern nodemask_t _unused_nodemask_arg_;
*/
#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \
__nodemask_pr_bits(maskp)
-static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
+static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
{
return m ? MAX_NUMNODES : 0;
}
-static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
+static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
{
return m ? m->bits : NULL;
}
@@ -132,19 +132,19 @@ static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
}
#define node_clear(node, dst) __node_clear((node), &(dst))
-static inline void __node_clear(int node, volatile nodemask_t *dstp)
+static __always_inline void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -154,14 +154,14 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline bool __node_test_and_set(int node, nodemask_t *addr)
+static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -169,7 +169,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_or(dst, src1, src2) \
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -177,7 +177,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_xor(dst, src1, src2) \
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -185,7 +185,7 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -193,7 +193,7 @@ static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
-static inline void __nodes_complement(nodemask_t *dstp,
+static __always_inline void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
@@ -201,7 +201,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_equal(const nodemask_t *src1p,
+static __always_inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -209,7 +209,7 @@ static inline bool __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_intersects(const nodemask_t *src1p,
+static __always_inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -217,33 +217,33 @@ static inline bool __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_subset(const nodemask_t *src1p,
+static __always_inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
#define nodes_shift_right(dst, src, n) \
__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_right(nodemask_t *dstp,
+static __always_inline void __nodes_shift_right(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
@@ -251,7 +251,7 @@ static inline void __nodes_shift_right(nodemask_t *dstp,
#define nodes_shift_left(dst, src, n) \
__nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_left(nodemask_t *dstp,
+static __always_inline void __nodes_shift_left(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
@@ -261,13 +261,13 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
> MAX_NUMNODES, then the silly min_ts could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline unsigned int __first_node(const nodemask_t *srcp)
+static __always_inline unsigned int __first_node(const nodemask_t *srcp)
{
return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline unsigned int __next_node(int n, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
@@ -277,7 +277,7 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
{
unsigned int ret = __next_node(node, srcp);
@@ -286,7 +286,7 @@ static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
return ret;
}
-static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
nodes_clear(*mask);
node_set(node, *mask);
@@ -304,7 +304,7 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline unsigned int __first_unset_node(const nodemask_t *maskp)
+static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
return min_t(unsigned int, MAX_NUMNODES,
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
@@ -338,21 +338,21 @@ static inline unsigned int __first_unset_node(const nodemask_t *maskp)
#define nodemask_parse_user(ubuf, ulen, dst) \
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
-static inline int __nodemask_parse_user(const char __user *buf, int len,
+static __always_inline int __nodemask_parse_user(const char __user *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
-static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
+static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
#define node_remap(oldbit, old, new) \
__node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
-static inline int __node_remap(int oldbit,
+static __always_inline int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
@@ -360,7 +360,7 @@ static inline int __node_remap(int oldbit,
#define nodes_remap(dst, src, old, new) \
__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
-static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
+static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
@@ -368,7 +368,7 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
#define nodes_onto(dst, orig, relmap) \
__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
-static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
@@ -376,7 +376,7 @@ static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
#define nodes_fold(dst, orig, sz) \
__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
-static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
@@ -418,22 +418,22 @@ enum node_states {
extern nodemask_t node_states[NR_NODE_STATES];
#if MAX_NUMNODES > 1
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node_isset(node, node_states[state]);
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
__node_set(node, &node_states[state]);
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
__node_clear(node, &node_states[state]);
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return nodes_weight(node_states[state]);
}
@@ -443,11 +443,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline unsigned int next_online_node(int nid)
+static __always_inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline unsigned int next_memory_node(int nid)
+static __always_inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
@@ -455,13 +455,13 @@ static inline unsigned int next_memory_node(int nid)
extern unsigned int nr_node_ids;
extern unsigned int nr_online_nodes;
-static inline void node_set_online(int nid)
+static __always_inline void node_set_online(int nid)
{
node_set_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
-static inline void node_set_offline(int nid)
+static __always_inline void node_set_offline(int nid)
{
node_clear_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
@@ -469,20 +469,20 @@ static inline void node_set_offline(int nid)
#else
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node == 0;
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return 1;
}
@@ -502,7 +502,7 @@ static inline int num_node_state(enum node_states state)
#endif
-static inline int node_random(const nodemask_t *maskp)
+static __always_inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
int w, bit;
diff --git a/include/linux/numa.h b/include/linux/numa.h
index eb19503604fe..3567e40329eb 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -30,6 +30,12 @@ static inline bool numa_valid_node(int nid)
#ifdef CONFIG_NUMA
#include <asm/sparsemem.h>
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+
+void __init alloc_node_data(int nid);
+void __init alloc_offline_node_data(int nid);
+
/* Generic implementation available */
int numa_nearest_node(int node, unsigned int state);
@@ -57,6 +63,8 @@ static inline int phys_to_target_node(u64 start)
{
return 0;
}
+
+static inline void alloc_offline_node_data(int nid) {}
#endif
#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h
new file mode 100644
index 000000000000..cfad6ce7e1bd
--- /dev/null
+++ b/include/linux/numa_memblks.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NUMA_MEMBLKS_H
+#define __NUMA_MEMBLKS_H
+
+#ifdef CONFIG_NUMA_MEMBLKS
+#include <linux/types.h>
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_reset_distance(void);
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
+
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
+
+int __init numa_memblks_init(int (*init_func)(void),
+ bool memblock_force_top_down);
+
+#ifdef CONFIG_NUMA_EMU
+int numa_emu_cmdline(char *str);
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids);
+u64 __init numa_emu_dma_end(void);
+void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt);
+#else
+static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt)
+{ }
+static inline int numa_emu_cmdline(char *str)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NUMA_EMU */
+
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+extern int phys_to_target_node(u64 start);
+#define phys_to_target_node phys_to_target_node
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif /* CONFIG_NUMA_KEEP_MEMINFO */
+
+#endif /* CONFIG_NUMA_MEMBLKS */
+
+#endif /* __NUMA_MEMBLKS_H */
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
index e10333d78dbb..19d2b256180f 100644
--- a/include/linux/nvme-keyring.h
+++ b/include/linux/nvme-keyring.h
@@ -12,7 +12,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn);
key_serial_t nvme_keyring_id(void);
-
+struct key *nvme_tls_key_lookup(key_serial_t key_id);
#else
static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
@@ -24,5 +24,9 @@ static inline key_serial_t nvme_keyring_id(void)
{
return 0;
}
+static inline struct key *nvme_tls_key_lookup(key_serial_t key_id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif /* !CONFIG_NVME_KEYRING */
#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index eb2f04d636c8..97c5f00b9aa3 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -25,6 +25,7 @@ enum nvme_rdma_cm_status {
NVME_RDMA_CM_NO_RSC = 0x06,
NVME_RDMA_CM_INVALID_IRD = 0x07,
NVME_RDMA_CM_INVALID_ORD = 0x08,
+ NVME_RDMA_CM_INVALID_CNTLID = 0x09,
};
static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
@@ -46,6 +47,8 @@ static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
return "invalid IRD";
case NVME_RDMA_CM_INVALID_ORD:
return "Invalid ORD";
+ case NVME_RDMA_CM_INVALID_CNTLID:
+ return "invalid controller ID";
default:
return "unrecognized reason";
}
@@ -64,7 +67,8 @@ struct nvme_rdma_cm_req {
__le16 qid;
__le16 hrqsize;
__le16 hsqsize;
- u8 rsvd[24];
+ __le16 cntlid;
+ u8 rsvd[22];
};
/**
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 7b2ae2e43544..b58d9405d65e 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -987,8 +987,8 @@ struct nvme_rw_command {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum {
@@ -1057,8 +1057,8 @@ struct nvme_write_zeroes_cmd {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum nvme_zone_mgmt_action {
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 082841908fe7..c9e3843d2dd5 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -84,13 +84,3 @@ extern void gpmc_read_settings_dt(struct device_node *np,
struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
-
-#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
-#else
-#define board_onenand_data NULL
-static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
-{
- return 0;
-}
-#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5769fe6e4950..1b3a76710487 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -66,8 +66,6 @@
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
* PG_arch_1 is an architecture specific page state bit. The generic code
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
@@ -103,22 +101,18 @@ enum pageflags {
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_error,
- PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
+ PG_owner_2, /* Owner use. If pagecache, fs may use */
PG_arch_1,
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
- PG_uncached, /* Page has been mapped as uncached */
-#endif
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
@@ -126,14 +120,21 @@ enum pageflags {
PG_young,
PG_idle,
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
PG_arch_2,
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
PG_arch_3,
#endif
__NR_PAGEFLAGS,
PG_readahead = PG_reclaim,
+ /* Anonymous memory (and shmem) */
+ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /* Some filesystems */
+ PG_checked = PG_owner_priv_1,
+
/*
* Depending on the way an anonymous folio can be mapped into a page
* table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
@@ -141,13 +142,13 @@ enum pageflags {
* tail pages of an anonymous folio. For now, we only expect it to be
* set on tail pages for PTE-mapped THP.
*/
- PG_anon_exclusive = PG_mappedtodisk,
-
- /* Filesystems */
- PG_checked = PG_owner_priv_1,
+ PG_anon_exclusive = PG_owner_2,
- /* SwapBacked */
- PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /*
+ * Set if all buffer heads in the folio are mapped.
+ * Filesystems which do not use BHs can use it for their own purpose.
+ */
+ PG_mappedtodisk = PG_owner_2,
/* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes
@@ -183,8 +184,9 @@ enum pageflags {
*/
/* At least one page in this folio has the hwpoison flag set */
- PG_has_hwpoisoned = PG_error,
+ PG_has_hwpoisoned = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
+ PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
@@ -235,7 +237,7 @@ static __always_inline int page_is_fake_head(const struct page *page)
return page_fixed_fake_head(page) != page;
}
-static inline unsigned long _compound_head(const struct page *page)
+static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
@@ -506,7 +508,6 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
-PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
@@ -514,8 +515,9 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
TESTCLEARFLAG(LRU, lru, PF_HEAD)
-PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
- TESTCLEARFLAG(Active, active, PF_HEAD)
+FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
@@ -531,9 +533,9 @@ PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
-PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
+FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
/*
* Private page markings that may be used by the filesystem that owns the page
@@ -542,8 +544,9 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
*/
PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
-PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
- TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
+
+/* owner_2 can be set on tail pages for anon memory */
+FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
/*
* Only test-and-set exist for PG_writeback. The unconditional operators are
@@ -556,8 +559,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
+FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
#ifdef CONFIG_HIGHMEM
/*
@@ -577,34 +580,26 @@ static __always_inline bool folio_test_swapcache(const struct folio *folio)
test_bit(PG_swapcache, const_folio_flags(folio, 0));
}
-static __always_inline bool PageSwapCache(const struct page *page)
-{
- return folio_test_swapcache(page_folio(page));
-}
-
-SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
-CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
+FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
+FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(SwapCache, swapcache)
+FOLIO_FLAG_FALSE(swapcache)
#endif
-PAGEFLAG(Unevictable, unevictable, PF_HEAD)
- __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
- TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
+FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
#ifdef CONFIG_MMU
-PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
-#else
-PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
- TESTSCFLAG_FALSE(Mlocked, mlocked)
-#endif
-
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
+FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(Uncached, uncached)
+FOLIO_FLAG_FALSE(mlocked)
+ __FOLIO_CLEAR_FLAG_NOOP(mlocked)
+ FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
+ FOLIO_TEST_SET_FLAG_FALSE(mlocked)
#endif
#ifdef CONFIG_MEMORY_FAILURE
@@ -865,8 +860,18 @@ static inline void ClearPageCompound(struct page *page)
ClearPageHead(page);
}
FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
+FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+/*
+ * PG_partially_mapped is protected by deferred_split split_queue_lock,
+ * so its safe to use non-atomic set/clear.
+ */
+__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
#else
FOLIO_FLAG_FALSE(large_rmappable)
+FOLIO_TEST_FLAG_FALSE(partially_mapped)
+__FOLIO_SET_FLAG_NOOP(partially_mapped)
+__FOLIO_CLEAR_FLAG_NOOP(partially_mapped)
#endif
#define PG_head_mask ((1UL << PG_head))
@@ -927,79 +932,74 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
- * For pages that are never mapped to userspace,
- * page_type may be used. Because it is initialised to -1, we invert the
- * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
- * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of _mapcount won't be
- * mistaken for a page type value.
+ * For pages that do not use mapcount, page_type may be used.
+ * The low 24 bits of pagetype may be used for your own purposes, as long
+ * as you are careful to not affect the top 8 bits. The low bits of
+ * pagetype will be overwritten when you clear the page_type from the page.
*/
-
enum pagetype {
- PG_buddy = 0x40000000,
- PG_offline = 0x20000000,
- PG_table = 0x10000000,
- PG_guard = 0x08000000,
- PG_hugetlb = 0x04000000,
- PG_slab = 0x02000000,
- PG_zsmalloc = 0x01000000,
-
- PAGE_TYPE_BASE = 0x80000000,
-
- /*
- * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and
- * allow owners that set a type to reuse the lower 16 bit for their own
- * purposes.
- */
- PAGE_MAPCOUNT_RESERVE = ~0x0000ffff,
+ /* 0x00-0x7f are positive numbers, ie mapcount */
+ /* Reserve 0x80-0xef for mapcount overflow. */
+ PGTY_buddy = 0xf0,
+ PGTY_offline = 0xf1,
+ PGTY_table = 0xf2,
+ PGTY_guard = 0xf3,
+ PGTY_hugetlb = 0xf4,
+ PGTY_slab = 0xf5,
+ PGTY_zsmalloc = 0xf6,
+ PGTY_unaccepted = 0xf7,
+
+ PGTY_mapcount_underflow = 0xff
};
-#define PageType(page, flag) \
- ((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
-#define folio_test_type(folio, flag) \
- ((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+static inline bool page_type_has_type(int page_type)
+{
+ return page_type < (PGTY_mapcount_underflow << 24);
+}
-static inline int page_type_has_type(unsigned int page_type)
+/* This takes a mapcount which is one more than page->_mapcount */
+static inline bool page_mapcount_is_type(unsigned int mapcount)
{
- return (int)page_type < PAGE_MAPCOUNT_RESERVE;
+ return page_type_has_type(mapcount - 1);
}
-static inline int page_has_type(const struct page *page)
+static inline bool page_has_type(const struct page *page)
{
- return page_type_has_type(READ_ONCE(page->page_type));
+ return page_mapcount_is_type(data_race(page->page_type));
}
#define FOLIO_TYPE_OPS(lname, fname) \
-static __always_inline bool folio_test_##fname(const struct folio *folio)\
+static __always_inline bool folio_test_##fname(const struct folio *folio) \
{ \
- return folio_test_type(folio, PG_##lname); \
+ return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __folio_set_##fname(struct folio *folio) \
{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
+ VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
+ folio); \
+ folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __folio_clear_##fname(struct folio *folio) \
{ \
VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
+ folio->page.page_type = UINT_MAX; \
}
#define PAGE_TYPE_OPS(uname, lname, fname) \
FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
- return PageType(page, PG_##lname); \
+ return data_race(page->page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(!PageType(page, 0), page); \
- page->page_type &= ~PG_##lname; \
+ VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
+ page->page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- page->page_type |= PG_##lname; \
+ page->page_type = UINT_MAX; \
}
/*
@@ -1076,6 +1076,13 @@ FOLIO_TEST_FLAG_FALSE(hugetlb)
PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
+/*
+ * Mark pages that has to be accepted before touched for the first time.
+ *
+ * Serialized with zone lock.
+ */
+PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
@@ -1175,25 +1182,20 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
- * page_has_private - Determine if page has private stuff
- * @page: The page to be checked
+ * folio_has_private - Determine if folio has private stuff
+ * @folio: The folio to be checked
*
- * Determine if a page has private stuff, indicating that release routines
+ * Determine if a folio has private stuff, indicating that release routines
* should be invoked upon it.
*/
-static inline int page_has_private(const struct page *page)
-{
- return !!(page->flags & PAGE_FLAGS_PRIVATE);
-}
-
-static inline bool folio_has_private(const struct folio *folio)
+static inline int folio_has_private(const struct folio *folio)
{
- return page_has_private(&folio->page);
+ return !!(folio->flags & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 904c52f97284..79dbd8bc35a7 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -26,11 +26,14 @@ struct page_counter {
atomic_long_t children_low_usage;
unsigned long watermark;
+ /* Latest cg2 reset watermark */
+ unsigned long local_watermark;
unsigned long failcnt;
/* Keep all the read most fields in a separete cacheline. */
CACHELINE_PADDING(_pad2_);
+ bool protection_support;
unsigned long min;
unsigned long low;
unsigned long high;
@@ -44,12 +47,17 @@ struct page_counter {
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
#endif
+/*
+ * Protection is supported only for the first counter (with id 0).
+ */
static inline void page_counter_init(struct page_counter *counter,
- struct page_counter *parent)
+ struct page_counter *parent,
+ bool protection_support)
{
- atomic_long_set(&counter->usage, 0);
+ counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
+ counter->protection_support = protection_support;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
@@ -78,11 +86,24 @@ int page_counter_memparse(const char *buf, const char *max,
static inline void page_counter_reset_watermark(struct page_counter *counter)
{
- counter->watermark = page_counter_read(counter);
+ unsigned long usage = page_counter_read(counter);
+
+ /*
+ * Update local_watermark first, so it's always <= watermark
+ * (modulo CPU/compiler re-ordering)
+ */
+ counter->local_watermark = usage;
+ counter->watermark = usage;
}
+#ifdef CONFIG_MEMCG
void page_counter_calculate_protection(struct page_counter *root,
struct page_counter *counter,
bool recursive_protection);
+#else
+static inline void page_counter_calculate_protection(struct page_counter *root,
+ struct page_counter *counter,
+ bool recursive_protection) {}
+#endif
#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d9c7edb6422b..68a5f1ff3301 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,6 +32,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
+int filemap_invalidate_pages(struct address_space *mapping,
+ loff_t pos, loff_t end, bool nowait);
int write_inode_now(struct inode *, int sync);
int filemap_fdatawrite(struct address_space *);
@@ -204,14 +206,21 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
- AS_LARGE_FOLIO_SUPPORT = 6,
- AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
- AS_STABLE_WRITES, /* must wait for writeback before modifying
+ AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
+ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
folio contents */
- AS_INACCESSIBLE, /* Do not attempt direct R/W access to the mapping,
- including to move the mapping */
+ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
+ /* Bits 16-25 are used for FOLIO_ORDER */
+ AS_FOLIO_ORDER_BITS = 5,
+ AS_FOLIO_ORDER_MIN = 16,
+ AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
};
+#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
+#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
+#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
+#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
+
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
@@ -367,9 +376,64 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
+/*
+ * mapping_max_folio_size_supported() - Check the max folio size supported
+ *
+ * The filesystem should call this function at mount time if there is a
+ * requirement on the folio mapping size in the page cache.
+ */
+static inline size_t mapping_max_folio_size_supported(void)
+{
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
+ return PAGE_SIZE;
+}
+
+/*
+ * mapping_set_folio_order_range() - Set the orders supported by a file.
+ * @mapping: The address space of the file.
+ * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
+ * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate which base size (min) and maximum size (max) of folio the VFS
+ * can use to cache the contents of the file. This should only be used
+ * if the filesystem needs special handling of folio sizes (ie there is
+ * something the core cannot know).
+ * Do not tune it based on, eg, i_size.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_folio_order_range(struct address_space *mapping,
+ unsigned int min,
+ unsigned int max)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return;
+
+ if (min > MAX_PAGECACHE_ORDER)
+ min = MAX_PAGECACHE_ORDER;
+
+ if (max > MAX_PAGECACHE_ORDER)
+ max = MAX_PAGECACHE_ORDER;
+
+ if (max < min)
+ max = min;
+
+ mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
+ (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
+}
+
+static inline void mapping_set_folio_min_order(struct address_space *mapping,
+ unsigned int min)
+{
+ mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
+}
+
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
- * @mapping: The file.
+ * @mapping: The address space of the file.
*
* The filesystem should call this function in its inode constructor to
* indicate that the VFS can use large folios to cache the contents of
@@ -380,7 +444,44 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
*/
static inline void mapping_set_large_folios(struct address_space *mapping)
{
- __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
+}
+
+static inline unsigned int
+mapping_max_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
+}
+
+static inline unsigned int
+mapping_min_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
+}
+
+static inline unsigned long
+mapping_min_folio_nrpages(struct address_space *mapping)
+{
+ return 1UL << mapping_min_folio_order(mapping);
+}
+
+/**
+ * mapping_align_index() - Align index for this mapping.
+ * @mapping: The address_space.
+ * @index: The page index.
+ *
+ * The index of a folio must be naturally aligned. If you are adding a
+ * new folio to the page cache and need to know what index to give it,
+ * call this function.
+ */
+static inline pgoff_t mapping_align_index(struct address_space *mapping,
+ pgoff_t index)
+{
+ return round_down(index, mapping_min_folio_nrpages(mapping));
}
/*
@@ -389,20 +490,17 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
*/
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
- /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
+ /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
"Anonymous mapping always supports large folio");
- return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
- test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ return mapping_max_folio_order(mapping) > 0;
}
/* Return the maximum folio size for this pagecache mapping, in bytes. */
-static inline size_t mapping_max_folio_size(struct address_space *mapping)
+static inline size_t mapping_max_folio_size(const struct address_space *mapping)
{
- if (mapping_large_folio_support(mapping))
- return PAGE_SIZE << MAX_PAGECACHE_ORDER;
- return PAGE_SIZE;
+ return PAGE_SIZE << mapping_max_folio_order(mapping);
}
static inline int filemap_nr_thps(struct address_space *mapping)
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 27cd1e59ccf7..f5eb5a32aeed 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -130,4 +130,62 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
pgoff_t nr, const struct mm_walk_ops *ops,
void *private);
+typedef int __bitwise folio_walk_flags_t;
+
+/*
+ * Walk migration entries as well. Careful: a large folio might get split
+ * concurrently.
+ */
+#define FW_MIGRATION ((__force folio_walk_flags_t)BIT(0))
+
+/* Walk shared zeropages (small + huge) as well. */
+#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(1))
+
+enum folio_walk_level {
+ FW_LEVEL_PTE,
+ FW_LEVEL_PMD,
+ FW_LEVEL_PUD,
+};
+
+/**
+ * struct folio_walk - folio_walk_start() / folio_walk_end() data
+ * @page: exact folio page referenced (if applicable)
+ * @level: page table level identifying the entry type
+ * @pte: pointer to the page table entry (FW_LEVEL_PTE).
+ * @pmd: pointer to the page table entry (FW_LEVEL_PMD).
+ * @pud: pointer to the page table entry (FW_LEVEL_PUD).
+ * @ptl: pointer to the page table lock.
+ *
+ * (see folio_walk_start() documentation for more details)
+ */
+struct folio_walk {
+ /* public */
+ struct page *page;
+ enum folio_walk_level level;
+ union {
+ pte_t *ptep;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ };
+ union {
+ pte_t pte;
+ pud_t pud;
+ pmd_t pmd;
+ };
+ /* private */
+ struct vm_area_struct *vma;
+ spinlock_t *ptl;
+};
+
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags);
+
+#define folio_walk_end(__fw, __vma) do { \
+ spin_unlock((__fw)->ptl); \
+ if (likely((__fw)->level == FW_LEVEL_PTE)) \
+ pte_unmap((__fw)->ptep); \
+ vma_pgtable_walk_end(__vma); \
+} while (0)
+
#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/path.h b/include/linux/path.h
index ca073e70decd..7ea389dc764b 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -18,12 +18,6 @@ static inline int path_equal(const struct path *path1, const struct path *path2)
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
}
-static inline void path_put_init(struct path *path)
-{
- path_put(path);
- *path = (struct path) { };
-}
-
/*
* Cleanup macro for use with __free(path_put). Avoids dereference and
* copying @path unlike DEFINE_FREE(). path_put() will handle the empty
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index df54cd5b15db..0e8b74e63767 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -8,6 +8,7 @@
/* Address Translation Service */
bool pci_ats_supported(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
+int pci_prepare_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
@@ -16,6 +17,8 @@ static inline bool pci_ats_supported(struct pci_dev *d)
{ return false; }
static inline int pci_enable_ats(struct pci_dev *d, int ps)
{ return -ENODEV; }
+static inline int pci_prepare_ats(struct pci_dev *dev, int ps)
+{ return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d)
{ return -ENODEV; }
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 85bdf2adb760..42ef06136bd1 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -128,6 +128,7 @@ struct pci_epc_mem {
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
+ * @domain_nr: PCI domain number of the endpoint controller
* @init_complete: flag to indicate whether the EPC initialization is complete
* or not
*/
@@ -145,10 +146,12 @@ struct pci_epc {
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
+ int domain_nr;
bool init_complete;
};
/**
+ * enum pci_epc_bar_type - configurability of endpoint BAR
* @BAR_PROGRAMMABLE: The BAR mask can be configured by the EPC.
* @BAR_FIXED: The BAR mask is fixed by the hardware.
* @BAR_RESERVED: The BAR should not be touched by an EPF driver.
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4cf89a4b4cbc..573b4c4c2be6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -371,6 +371,7 @@ struct pci_dev {
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int pinned:1; /* Whether this dev is pinned */
+ unsigned int config_rrs_sv:1; /* Config RRS software visibility */
unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
@@ -517,6 +518,9 @@ struct pci_dev {
#ifdef CONFIG_PCI_DOE
struct xarray doe_mbs; /* Data Object Exchange mailboxes */
#endif
+#ifdef CONFIG_PCI_NPEM
+ struct npem *npem; /* Native PCIe Enclosure Management */
+#endif
u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
@@ -1098,7 +1102,7 @@ enum pcie_bus_config_types {
extern enum pcie_bus_config_types pcie_bus_config;
-extern struct bus_type pci_bus_type;
+extern const struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
@@ -1884,7 +1888,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{ return 0; }
#endif
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
#endif
/* Some architectures require additional setup to direct VGA traffic */
@@ -2290,8 +2294,11 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name);
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
const char *name);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index e388c8b1cbc2..4cf6aaed5f35 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -580,6 +580,7 @@
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3 0x124b
#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
@@ -2661,6 +2662,8 @@
#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+#define PCI_VENDOR_ID_GLENFLY 0x6766
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
@@ -2706,6 +2709,9 @@
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
#define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a
+#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
+#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
+#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
#define PCI_DEVICE_ID_INTEL_82437 0x122d
#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 36b942b67b7d..c012df33a9f0 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -145,7 +145,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
- bool read, unsigned long ip)
+ unsigned long ip)
{
lock_release(&sem->dep_map, ip);
}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4b2047b78b67..b6321fc49159 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -135,7 +135,6 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
gfp_t gfp) __alloc_size(1);
-extern size_t pcpu_alloc_size(void __percpu *__pdata);
#define __alloc_percpu_gfp(_size, _align, _gfp) \
alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp))
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index b3b34f6670cf..4b5b83677e3f 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -17,10 +17,14 @@
#ifdef CONFIG_ARM_PMU
/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
+ * The Armv7 and Armv8.8 or less CPU PMU supports up to 32 event counters.
+ * The Armv8.9/9.4 CPU PMU supports up to 33 event counters.
*/
+#ifdef CONFIG_ARM
#define ARMPMU_MAX_HWEVENTS 32
-
+#else
+#define ARMPMU_MAX_HWEVENTS 33
+#endif
/*
* ARM PMU hw_event flags
*/
@@ -96,7 +100,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
- int num_events;
+ DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS);
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 7867db04ec98..3372c1b56486 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -6,8 +6,9 @@
#ifndef __PERF_ARM_PMUV3_H
#define __PERF_ARM_PMUV3_H
-#define ARMV8_PMU_MAX_COUNTERS 32
-#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS 31
+#define ARMV8_PMU_CYCLE_IDX 31
+#define ARMV8_PMU_INSTR_IDX 32 /* Not accessible from AArch32 */
/*
* Common architectural and microarchitectural event numbers.
@@ -227,8 +228,10 @@
*/
#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
#define ARMV8_PMU_OVSR_C BIT(31)
+#define ARMV8_PMU_OVSR_F BIT_ULL(32) /* arm64 only */
/* Mask for writable bits is both P and C fields */
-#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C | \
+ ARMV8_PMU_OVSR_F)
/*
* PMXEVTYPER: Event selection reg
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1a8942277dda..fb908843f209 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -168,6 +168,9 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
};
+ struct { /* aux / Intel-PT */
+ u64 aux_config;
+ };
struct { /* software */
struct hrtimer hrtimer;
};
@@ -292,6 +295,19 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+/**
+ * pmu::scope
+ */
+enum perf_pmu_scope {
+ PERF_PMU_SCOPE_NONE = 0,
+ PERF_PMU_SCOPE_CORE,
+ PERF_PMU_SCOPE_DIE,
+ PERF_PMU_SCOPE_CLUSTER,
+ PERF_PMU_SCOPE_PKG,
+ PERF_PMU_SCOPE_SYS_WIDE,
+ PERF_PMU_MAX_SCOPE,
+};
+
struct perf_output_handle;
#define PMU_NULL_DEV ((void *)(~0UL))
@@ -315,6 +331,11 @@ struct pmu {
*/
int capabilities;
+ /*
+ * PMU scope
+ */
+ unsigned int scope;
+
int __percpu *pmu_disable_count;
struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
@@ -615,10 +636,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
* cannot be a group leader. If an event with this flag is detached from the
* group it is scheduled out and moved into an unrecoverable ERROR state.
+ * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
+ * PMU scope where it is active.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define PERF_EV_CAP_SIBLING BIT(2)
+#define PERF_EV_CAP_READ_SCOPE BIT(3)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -963,12 +987,16 @@ struct perf_event_context {
struct rcu_head rcu_head;
/*
- * Sum (event->pending_work + event->pending_work)
+ * The count of events for which using the switch-out fast path
+ * should be avoided.
+ *
+ * Sum (event->pending_work + events with
+ * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)))
*
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
* that until the signal is delivered.
*/
- local_t nr_pending;
+ local_t nr_no_switch_fast;
};
struct perf_cpu_pmu_context {
@@ -1602,13 +1630,7 @@ static inline int perf_is_paranoid(void)
return sysctl_perf_event_paranoid > -1;
}
-static inline int perf_allow_kernel(struct perf_event_attr *attr)
-{
- if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
- return -EACCES;
-
- return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
-}
+int perf_allow_kernel(struct perf_event_attr *attr);
static inline int perf_allow_cpu(struct perf_event_attr *attr)
{
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 207f0c83c8e9..59a3deb792a8 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -80,36 +80,6 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
}
}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr)
-{
- int i;
- struct page_ext *first_page_ext;
- struct page_ext *page_ext;
- union codetag_ref *ref;
- struct alloc_tag *tag;
-
- if (!mem_alloc_profiling_enabled())
- return;
-
- first_page_ext = page_ext = page_ext_get(page);
- if (unlikely(!page_ext))
- return;
-
- ref = codetag_ref_from_page_ext(page_ext);
- if (!ref->ct)
- goto out;
-
- tag = ct_to_alloc_tag(ref->ct);
- page_ext = page_ext_next(page_ext);
- for (i = 1; i < nr; i++) {
- /* Set new reference to point to the original tag */
- alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag);
- page_ext = page_ext_next(page_ext);
- }
-out:
- page_ext_put(first_page_ext);
-}
-
static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
{
struct alloc_tag *tag = NULL;
@@ -142,7 +112,6 @@ static inline void clear_page_tag_ref(struct page *page) {}
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr) {}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {}
static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 2a6a3cccfc36..e8b2ac6bd2ae 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -447,6 +447,12 @@ static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
}
#endif
+#ifndef arch_check_zapped_pud
+static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -1950,6 +1956,18 @@ typedef unsigned int pgtbl_mod_mask;
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
#endif
+#ifndef pte_pgprot
+#define pte_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pmd_pgprot
+#define pmd_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pud_pgprot
+#define pud_pgprot(x) ((pgprot_t) {0})
+#endif
+
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index a65d3d078e58..53cfde98433d 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -81,6 +81,8 @@ struct pinctrl_map;
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
+ * @PIN_CONFIG_INPUT_SCHMITT_UV: this will configure an input pin to run in
+ * schmitt-trigger mode. The argument is in uV.
* @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
@@ -132,6 +134,7 @@ enum pin_config_param {
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
+ PIN_CONFIG_INPUT_SCHMITT_UV,
PIN_CONFIG_MODE_LOW_POWER,
PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
deleted file mode 100644
index d687ef5726c2..000000000000
--- a/include/linux/platform_data/ad5449.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
- * Converter driver.
- *
- * Copyright 2012 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
-#define __LINUX_PLATFORM_DATA_AD5449_H__
-
-/**
- * enum ad5449_sdo_mode - AD5449 SDO pin configuration
- * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength.
- * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength.
- * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode.
- * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to
- * read back from the device.
- */
-enum ad5449_sdo_mode {
- AD5449_SDO_DRIVE_FULL = 0x0,
- AD5449_SDO_DRIVE_WEAK = 0x1,
- AD5449_SDO_OPEN_DRAIN = 0x2,
- AD5449_SDO_DISABLED = 0x3,
-};
-
-/**
- * struct ad5449_platform_data - Platform data for the ad5449 DAC driver
- * @sdo_mode: SDO pin mode
- * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the
- * outputs to midscale (true) or to zero scale(false).
- */
-struct ad5449_platform_data {
- enum ad5449_sdo_mode sdo_mode;
- bool hardware_clear_to_midscale;
-};
-
-#endif
diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h
new file mode 100644
index 000000000000..576d952f97ed
--- /dev/null
+++ b/include/linux/platform_data/amd_qdma.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_QDMA_H
+#define _PLATDATA_AMD_QDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct qdma_queue_info - DMA queue information. This information is used to
+ * match queue when DMA channel is requested
+ * @dir: Channel transfer direction
+ */
+struct qdma_queue_info {
+ enum dma_transfer_direction dir;
+};
+
+#define QDMA_FILTER_PARAM(qinfo) ((void *)(qinfo))
+
+struct dma_slave_map;
+
+/**
+ * struct qdma_platdata - Platform specific data for QDMA engine
+ * @max_mm_channels: Maximum number of MM DMA channels in each direction
+ * @device_map: DMA slave map
+ * @irq_index: The index of first IRQ
+ */
+struct qdma_platdata {
+ u32 max_mm_channels;
+ u32 irq_index;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_QDMA_H */
diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h
deleted file mode 100644
index 5dc9d2be384b..000000000000
--- a/include/linux/platform_data/cyttsp4.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP4_H_
-#define _CYTTSP4_H_
-
-#define CYTTSP4_MT_NAME "cyttsp4_mt"
-#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter"
-#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter"
-
-#define CY_TOUCH_SETTINGS_MAX 32
-
-struct touch_framework {
- const uint16_t *abs;
- uint8_t size;
- uint8_t enable_vkeys;
-} __packed;
-
-struct cyttsp4_mt_platform_data {
- struct touch_framework *frmwrk;
- unsigned short flags;
- char const *inp_dev_name;
-};
-
-struct touch_settings {
- const uint8_t *data;
- uint32_t size;
- uint8_t tag;
-} __packed;
-
-struct cyttsp4_core_platform_data {
- int irq_gpio;
- int rst_gpio;
- int level_irq_udelay;
- int (*xres)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- int (*init)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev);
- int (*power)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev, atomic_t *ignore_irq);
- int (*irq_stat)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX];
-};
-
-struct cyttsp4_platform_data {
- struct cyttsp4_core_platform_data *core_pdata;
- struct cyttsp4_mt_platform_data *mt_pdata;
-};
-
-#endif /* _CYTTSP4_H_ */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
deleted file mode 100644
index eb9805bb3fe8..000000000000
--- a/include/linux/platform_data/dma-ep93xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-
-/*
- * M2P channels.
- *
- * Note that these values are also directly used for setting the PPALLOC
- * register.
- */
-#define EP93XX_DMA_I2S1 0
-#define EP93XX_DMA_I2S2 1
-#define EP93XX_DMA_AAC1 2
-#define EP93XX_DMA_AAC2 3
-#define EP93XX_DMA_AAC3 4
-#define EP93XX_DMA_I2S3 5
-#define EP93XX_DMA_UART1 6
-#define EP93XX_DMA_UART2 7
-#define EP93XX_DMA_UART3 8
-#define EP93XX_DMA_IRDA 9
-/* M2M channels */
-#define EP93XX_DMA_SSP 10
-#define EP93XX_DMA_IDE 11
-
-/**
- * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
- * @port: peripheral which is requesting the channel
- * @direction: TX/RX channel
- * @name: optional name for the channel, this is displayed in /proc/interrupts
- *
- * This information is passed as private channel parameter in a filter
- * function. Note that this is only needed for slave/cyclic channels. For
- * memcpy channels %NULL data should be passed.
- */
-struct ep93xx_dma_data {
- int port;
- enum dma_transfer_direction direction;
- const char *name;
-};
-
-/**
- * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
- * @name: name of the channel, used for getting the right clock for the channel
- * @base: mapped registers
- * @irq: interrupt number used by this channel
- */
-struct ep93xx_dma_chan_data {
- const char *name;
- void __iomem *base;
- int irq;
-};
-
-/**
- * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
- * @channels: array of channels which are passed to the driver
- * @num_channels: number of channels in the array
- *
- * This structure is passed to the DMA engine driver via platform data. For
- * M2P channels, contract is that even channels are for TX and odd for RX.
- * There is no requirement for the M2M channels.
- */
-struct ep93xx_dma_platform_data {
- struct ep93xx_dma_chan_data *channels;
- size_t num_channels;
-};
-
-static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
-}
-
-/**
- * ep93xx_dma_chan_direction - returns direction the channel can be used
- * @chan: channel
- *
- * This function can be used in filter functions to find out whether the
- * channel supports given DMA direction. Only M2P channels have such
- * limitation, for M2M channels the direction is configurable.
- */
-static inline enum dma_transfer_direction
-ep93xx_dma_chan_direction(struct dma_chan *chan)
-{
- if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_TRANS_NONE;
-
- /* even channels are for TX, odd for RX */
- return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
-}
-
-#endif /* __ASM_ARCH_DMA_H */
diff --git a/include/linux/platform_data/eth-ep93xx.h b/include/linux/platform_data/eth-ep93xx.h
deleted file mode 100644
index 8eef637a804d..000000000000
--- a/include/linux/platform_data/eth-ep93xx.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PLATFORM_DATA_ETH_EP93XX
-#define _LINUX_PLATFORM_DATA_ETH_EP93XX
-
-struct ep93xx_eth_data {
- unsigned char dev_addr[6];
- unsigned char phy_id;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h
deleted file mode 100644
index 3ea6dd942c27..000000000000
--- a/include/linux/platform_data/gpio-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Atheros AR7XXX/AR9XXX GPIO controller platform data
- *
- * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-
-struct ath79_gpio_platform_data {
- unsigned ngpios;
- bool oe_inverted;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
deleted file mode 100644
index b82e44662efe..000000000000
--- a/include/linux/platform_data/gpio-davinci.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * DaVinci GPIO Platform Related Defines
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- */
-
-#ifndef __DAVINCI_GPIO_PLATFORM_H
-#define __DAVINCI_GPIO_PLATFORM_H
-
-struct davinci_gpio_platform_data {
- bool no_auto_base;
- u32 base;
- u32 ngpio;
- u32 gpio_unbanked;
-};
-
-/* Convert GPIO signal to GPIO pin number */
-#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-
-#endif
diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h
deleted file mode 100644
index 3054fced8509..000000000000
--- a/include/linux/platform_data/keypad-ep93xx.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __KEYPAD_EP93XX_H
-#define __KEYPAD_EP93XX_H
-
-struct matrix_keymap_data;
-
-/* flags for the ep93xx_keypad driver */
-#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
-#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
-#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */
-#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */
-#define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */
-
-/**
- * struct ep93xx_keypad_platform_data - platform specific device structure
- * @keymap_data: pointer to &matrix_keymap_data
- * @debounce: debounce start count; terminal count is 0xff
- * @prescale: row/column counter pre-scaler load value
- * @flags: see above
- */
-struct ep93xx_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
- unsigned int debounce;
- unsigned int prescale;
- unsigned int flags;
- unsigned int clk_rate;
-};
-
-#define EP93XX_MATRIX_ROWS (8)
-#define EP93XX_MATRIX_COLS (8)
-
-#endif /* __KEYPAD_EP93XX_H */
diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h
deleted file mode 100644
index 7efabbca1dca..000000000000
--- a/include/linux/platform_data/keypad-nomadik-ske.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
- *
- * ux500 Scroll key and Keypad Encoder (SKE) header
- */
-
-#ifndef __SKE_H
-#define __SKE_H
-
-#include <linux/input/matrix_keypad.h>
-
-/* register definitions for SKE peripheral */
-#define SKE_CR 0x00
-#define SKE_VAL0 0x04
-#define SKE_VAL1 0x08
-#define SKE_DBCR 0x0C
-#define SKE_IMSC 0x10
-#define SKE_RIS 0x14
-#define SKE_MIS 0x18
-#define SKE_ICR 0x1C
-
-/*
- * Keypad module
- */
-
-/**
- * struct keypad_platform_data - structure for platform specific data
- * @init: pointer to keypad init function
- * @exit: pointer to keypad deinitialisation function
- * @keymap_data: matrix scan code table for keycodes
- * @krow: maximum number of rows
- * @kcol: maximum number of columns
- * @debounce_ms: platform specific debounce time
- * @no_autorepeat: flag for auto repetition
- * @wakeup_enable: allow waking up the system
- */
-struct ske_keypad_platform_data {
- int (*init)(void);
- int (*exit)(void);
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_ms;
- bool no_autorepeat;
- bool wakeup_enable;
-};
-#endif /*__SKE_KPD_H*/
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
deleted file mode 100644
index 6fbb70005541..000000000000
--- a/include/linux/platform_data/max6697.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * max6697.h
- * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
- */
-
-#ifndef MAX6697_H
-#define MAX6697_H
-
-#include <linux/types.h>
-
-/*
- * For all bit masks:
- * bit 0: local temperature
- * bit 1..7: remote temperatures
- */
-struct max6697_platform_data {
- bool smbus_timeout_disable; /* set to disable SMBus timeouts */
- bool extended_range_enable; /* set to enable extended temp range */
- bool beta_compensation; /* set to enable beta compensation */
- u8 alert_mask; /* set bit to 1 to disable alert */
- u8 over_temperature_mask; /* set bit to 1 to disable */
- u8 resistance_cancellation; /* set bit to 0 to disable
- * bit mask for MAX6581,
- * boolean for other chips
- */
- u8 ideality_mask; /* set bit to 0 to disable */
- u8 ideality_value; /* transistor ideality as per
- * MAX6581 datasheet
- */
-};
-
-#endif /* MAX6697_H */
diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h
deleted file mode 100644
index fcc6f2a1f5c3..000000000000
--- a/include/linux/platform_data/mcs.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- */
-
-#ifndef __LINUX_MCS_H
-#define __LINUX_MCS_H
-
-#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff))
-#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff)
-#define MCS_KEY_CODE(v) ((v) & 0xffff)
-
-struct mcs_platform_data {
- void (*poweron)(bool);
- void (*cfg_pin)(void);
-
- /* touchscreen */
- unsigned int x_size;
- unsigned int y_size;
-
- /* touchkey */
- const u32 *keymap;
- unsigned int keymap_size;
- unsigned int key_maxval;
- bool no_autorepeat;
-};
-
-#endif /* __LINUX_MCS_H */
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
deleted file mode 100644
index a49826214a39..000000000000
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * TI DaVinci AEMIF support
- *
- * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef _MACH_DAVINCI_AEMIF_H
-#define _MACH_DAVINCI_AEMIF_H
-
-#include <linux/platform_device.h>
-
-#define NRCSR_OFFSET 0x00
-#define AWCCR_OFFSET 0x04
-#define A1CR_OFFSET 0x10
-
-#define ACR_ASIZE_MASK 0x3
-#define ACR_EW_MASK BIT(30)
-#define ACR_SS_MASK BIT(31)
-
-/* All timings in nanoseconds */
-struct davinci_aemif_timing {
- u8 wsetup;
- u8 wstrobe;
- u8 whold;
-
- u8 rsetup;
- u8 rstrobe;
- u8 rhold;
-
- u8 ta;
-};
-
-#endif
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
deleted file mode 100644
index dd474dd44848..000000000000
--- a/include/linux/platform_data/mtd-davinci.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mach-davinci/nand.h
- *
- * Copyright © 2006 Texas Instruments.
- *
- * Ported to 2.6.23 Copyright © 2008 by
- * Sander Huijsen <Shuijsen@optelecom-nkf.com>
- * Troy Kisky <troy.kisky@boundarydevices.com>
- * Dirk Behme <Dirk.Behme@gmail.com>
- *
- * --------------------------------------------------------------------------
- */
-
-#ifndef __ARCH_ARM_DAVINCI_NAND_H
-#define __ARCH_ARM_DAVINCI_NAND_H
-
-#include <linux/mtd/rawnand.h>
-
-#define NANDFCR_OFFSET 0x60
-#define NANDFSR_OFFSET 0x64
-#define NANDF1ECC_OFFSET 0x70
-
-/* 4-bit ECC syndrome registers */
-#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
-#define NAND_4BIT_ECC1_OFFSET 0xc0
-#define NAND_4BIT_ECC2_OFFSET 0xc4
-#define NAND_4BIT_ECC3_OFFSET 0xc8
-#define NAND_4BIT_ECC4_OFFSET 0xcc
-#define NAND_ERR_ADD1_OFFSET 0xd0
-#define NAND_ERR_ADD2_OFFSET 0xd4
-#define NAND_ERR_ERRVAL1_OFFSET 0xd8
-#define NAND_ERR_ERRVAL2_OFFSET 0xdc
-
-/* NOTE: boards don't need to use these address bits
- * for ALE/CLE unless they support booting from NAND.
- * They're used unless platform data overrides them.
- */
-#define MASK_ALE 0x08
-#define MASK_CLE 0x10
-
-struct davinci_nand_pdata { /* platform_data */
- uint32_t mask_ale;
- uint32_t mask_cle;
-
- /*
- * 0-indexed chip-select number of the asynchronous
- * interface to which the NAND device has been connected.
- *
- * So, if you have NAND connected to CS3 of DA850, you
- * will pass '1' here. Since the asynchronous interface
- * on DA850 starts from CS2.
- */
- uint32_t core_chipsel;
-
- /* for packages using two chipselects */
- uint32_t mask_chipsel;
-
- /* board's default static partition info */
- struct mtd_partition *parts;
- unsigned nr_parts;
-
- /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
- * soft == NAND_ECC_ENGINE_TYPE_SOFT
- * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
- *
- * All DaVinci-family chips support 1-bit hardware ECC.
- * Newer ones also support 4-bit ECC, but are awkward
- * using it with large page chips.
- */
- enum nand_ecc_engine_type engine_type;
- enum nand_ecc_placement ecc_placement;
- u8 ecc_bits;
-
- /* e.g. NAND_BUSWIDTH_16 */
- unsigned options;
- /* e.g. NAND_BBT_USE_FLASH */
- unsigned bbt_options;
-
- /* Main and mirror bbt descriptor overrides */
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- /* Access timings */
- struct davinci_aemif_timing *timing;
-};
-
-#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
deleted file mode 100644
index b439f2a896e0..000000000000
--- a/include/linux/platform_data/spi-ep93xx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MACH_EP93XX_SPI_H
-#define __ASM_MACH_EP93XX_SPI_H
-
-struct spi_device;
-
-/**
- * struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @use_dma: use DMA for the transfers
- */
-struct ep93xx_spi_info {
- bool use_dma;
-};
-
-#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
deleted file mode 100644
index 77625251df07..000000000000
--- a/include/linux/platform_data/ti-aemif.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TI DaVinci AEMIF platform glue.
- *
- * Copyright (C) 2017 BayLibre SAS
- *
- * Author:
- * Bartosz Golaszewski <bgolaszewski@baylibre.com>
- */
-
-#ifndef __TI_DAVINCI_AEMIF_DATA_H__
-#define __TI_DAVINCI_AEMIF_DATA_H__
-
-#include <linux/of_platform.h>
-
-/**
- * struct aemif_abus_data - Async bus configuration parameters.
- *
- * @cs - Chip-select number.
- */
-struct aemif_abus_data {
- u32 cs;
-};
-
-/**
- * struct aemif_platform_data - Data to set up the TI aemif driver.
- *
- * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif
- * subdevices.
- * @cs_offset: Lowest allowed chip-select number.
- * @abus_data: Array of async bus configuration entries.
- * @num_abus_data: Number of abus entries.
- * @sub_devices: Array of platform subdevices.
- * @num_sub_devices: Number of subdevices.
- */
-struct aemif_platform_data {
- struct of_dev_auxdata *dev_lookup;
- u32 cs_offset;
- struct aemif_abus_data *abus_data;
- size_t num_abus_data;
- struct platform_device *sub_devices;
- size_t num_sub_devices;
-};
-
-#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index ae9bf7479e7b..365e119bebaa 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -4,6 +4,7 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/dmi.h>
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
@@ -69,6 +70,7 @@
#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO 0x00110019
/* Misc */
#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
@@ -165,4 +167,39 @@ static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
}
#endif
+/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */
+static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605M"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC71L"),
+ },
+ },
+ { },
+};
+
#endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/x86/intel-mid_wdt.h
index 8dba70b4b020..e5c0210d0fec 100644
--- a/include/linux/platform_data/intel-mid_wdt.h
+++ b/include/linux/platform_data/x86/intel-mid_wdt.h
@@ -6,8 +6,8 @@
* Contact: David Cohen <david.a.cohen@linux.intel.com>
*/
-#ifndef __INTEL_MID_WDT_H__
-#define __INTEL_MID_WDT_H__
+#ifndef __PLATFORM_X86_INTEL_MID_WDT_H_
+#define __PLATFORM_X86_INTEL_MID_WDT_H_
#include <linux/platform_device.h>
@@ -16,4 +16,4 @@ struct intel_mid_wdt_pdata {
int (*probe)(struct platform_device *pdev);
};
-#endif /*__INTEL_MID_WDT_H__*/
+#endif /* __PLATFORM_X86_INTEL_MID_WDT_H_ */
diff --git a/include/linux/platform_data/x86/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h
new file mode 100644
index 000000000000..0ca9962e97f2
--- /dev/null
+++ b/include/linux/platform_data/x86/intel_scu_ipc.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_
+#define __PLATFORM_X86_INTEL_SCU_IPC_H_
+
+#include <linux/ioport.h>
+
+struct device;
+struct intel_scu_ipc_dev;
+
+/**
+ * struct intel_scu_ipc_data - Data used to configure SCU IPC
+ * @mem: Base address of SCU IPC MMIO registers
+ * @irq: The IRQ number used for SCU (optional)
+ */
+struct intel_scu_ipc_data {
+ struct resource mem;
+ int irq;
+};
+
+struct intel_scu_ipc_dev *
+__intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner);
+
+#define intel_scu_ipc_register(parent, scu_data) \
+ __intel_scu_ipc_register(parent, scu_data, THIS_MODULE)
+
+void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu);
+
+struct intel_scu_ipc_dev *
+__devm_intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner);
+
+#define devm_intel_scu_ipc_register(parent, scu_data) \
+ __devm_intel_scu_ipc_register(parent, scu_data, THIS_MODULE)
+
+struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void);
+void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu);
+struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev);
+
+int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 *data);
+int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 data);
+int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr,
+ u8 *data, size_t len);
+int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr,
+ u8 *data, size_t len);
+
+int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 data, u8 mask);
+
+int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub);
+int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ size_t size, void *out, size_t outlen);
+
+static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+ return intel_scu_ipc_dev_command_with_size(scu, cmd, sub, in, inlen,
+ inlen, out, outlen);
+}
+
+#endif
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
deleted file mode 100644
index 2463a4a856a6..000000000000
--- a/include/linux/platform_data/zforce_ts.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* drivers/input/touchscreen/zforce.c
- *
- * Copyright (C) 2012-2013 MundoReader S.L.
- */
-
-#ifndef _LINUX_INPUT_ZFORCE_TS_H
-#define _LINUX_INPUT_ZFORCE_TS_H
-
-struct zforce_ts_platdata {
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index d422db6eec63..7132623e4658 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -52,7 +52,7 @@ struct platform_device {
extern int platform_device_register(struct platform_device *);
extern void platform_device_unregister(struct platform_device *);
-extern struct bus_type platform_bus_type;
+extern const struct bus_type platform_bus_type;
extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 858c8e7851fb..b637ec14025f 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -198,8 +198,11 @@ struct generic_pm_domain {
spinlock_t slock;
unsigned long lock_flags;
};
+ struct {
+ raw_spinlock_t raw_slock;
+ unsigned long raw_lock_flags;
+ };
};
-
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -473,6 +476,9 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
int dev_pm_domain_attach_list(struct device *dev,
const struct dev_pm_domain_attach_data *data,
struct dev_pm_domain_list **list);
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_detach_list(struct dev_pm_domain_list *list);
int dev_pm_domain_start(struct device *dev);
@@ -499,6 +505,14 @@ static inline int dev_pm_domain_attach_list(struct device *dev,
{
return 0;
}
+
+static inline int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ return 0;
+}
+
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {}
static inline int dev_pm_domain_start(struct device *dev)
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index dc7b738de299..453691710839 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -158,7 +158,7 @@ static inline void posix_cputimers_init_work(void) { }
* @rcu: RCU head for freeing the timer.
*/
struct k_itimer {
- struct list_head list;
+ struct hlist_node list;
struct hlist_node t_hash;
spinlock_t it_lock;
const struct k_clock *kclock;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 72dc7e45c90c..910d407ebe63 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -243,8 +243,7 @@ struct power_supply_desc {
const char *name;
enum power_supply_type type;
u8 charge_behaviours;
- const enum power_supply_usb_type *usb_types;
- size_t num_usb_types;
+ u32 usb_types;
const enum power_supply_property *properties;
size_t num_properties;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index b937cefcb31c..eca9bb2ee637 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -9,6 +9,8 @@
#include <linux/ratelimit_types.h>
#include <linux/once_lite.h>
+struct console;
+
extern const char linux_banner[];
extern const char linux_proc_banner[];
@@ -161,15 +163,16 @@ int _printk(const char *fmt, ...);
*/
__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
-extern void __printk_safe_enter(void);
-extern void __printk_safe_exit(void);
+extern void __printk_deferred_enter(void);
+extern void __printk_deferred_exit(void);
+
/*
* The printk_deferred_enter/exit macros are available only as a hack for
* some code paths that need to defer all printk console printing. Interrupts
* must be disabled for the deferred duration.
*/
-#define printk_deferred_enter __printk_safe_enter
-#define printk_deferred_exit __printk_safe_exit
+#define printk_deferred_enter() __printk_deferred_enter()
+#define printk_deferred_exit() __printk_deferred_exit()
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -197,6 +200,10 @@ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
void console_try_replay_all(void);
+void printk_legacy_allow_panic_sync(void);
+extern bool nbcon_device_try_acquire(struct console *con);
+extern void nbcon_device_release(struct console *con);
+void nbcon_atomic_flush_unsafe(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@@ -279,6 +286,24 @@ static inline void printk_trigger_flush(void)
static inline void console_try_replay_all(void)
{
}
+
+static inline void printk_legacy_allow_panic_sync(void)
+{
+}
+
+static inline bool nbcon_device_try_acquire(struct console *con)
+{
+ return false;
+}
+
+static inline void nbcon_device_release(struct console *con)
+{
+}
+
+static inline void nbcon_atomic_flush_unsafe(void)
+{
+}
+
#endif
bool this_cpu_in_panic(void);
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
index 24da8364b919..9c094294403f 100644
--- a/include/linux/prmt.h
+++ b/include/linux/prmt.h
@@ -2,6 +2,11 @@
#ifdef CONFIG_ACPI_PRMT
void init_prmt(void);
+int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer);
#else
static inline void init_prmt(void) { }
+static inline int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 638507a3c8ff..fed601053c51 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -182,7 +182,7 @@ struct pstore_info {
struct module *owner;
const char *name;
- spinlock_t buf_lock;
+ raw_spinlock_t buf_lock;
char *buf;
size_t bufsize;
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index 1b5a953c6bbc..3a74f69e0b59 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -10,7 +10,7 @@
#ifndef _PTP_CLASSIFY_H_
#define _PTP_CLASSIFY_H_
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ip.h>
#include <linux/ktime.h>
#include <linux/skbuff.h>
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index f8c2dc12dbd3..8acd60b53f58 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -394,9 +394,6 @@ static inline bool pwm_might_sleep(struct pwm_device *pwm)
}
/* PWM provider APIs */
-int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
- unsigned long timeout);
-
void pwmchip_put(struct pwm_chip *chip);
struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
@@ -462,13 +459,6 @@ static inline void pwm_disable(struct pwm_device *pwm)
might_sleep();
}
-static inline int pwm_capture(struct pwm_device *pwm,
- struct pwm_capture *result,
- unsigned long timeout)
-{
- return -EINVAL;
-}
-
static inline void pwmchip_put(struct pwm_chip *chip)
{
}
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 07071e64abf3..89a0d83ddad0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -526,7 +526,7 @@ struct quota_info {
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
-int register_quota_format(struct quota_format_type *fmt);
+void register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
struct quota_module_name {
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index 002266693e50..765232ce0b5e 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -19,8 +19,8 @@ struct ratelimit_state {
int burst;
int printed;
int missed;
+ unsigned int flags;
unsigned long begin;
- unsigned long flags;
};
#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index f7edca369eda..7c173aa64e1e 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -245,6 +245,42 @@ rb_find_add(struct rb_node *node, struct rb_root *tree,
}
/**
+ * rb_find_add_rcu() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Adds a Store-Release for link_node.
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add_rcu(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node_rcu(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
* rb_find() - find @key in tree @tree
* @key: key to match
* @tree: tree to search
@@ -273,6 +309,37 @@ rb_find(const void *key, const struct rb_root *tree,
}
/**
+ * rb_find_rcu() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Notably, tree descent vs concurrent tree rotations is unsound and can result
+ * in false-negatives.
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_rcu(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = rcu_dereference_raw(node->rb_left);
+ else if (c > 0)
+ node = rcu_dereference_raw(node->rb_right);
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
* rb_find_first() - find the first @key in @tree
* @key: key to match
* @tree: tree to search
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index ba95c06675e1..2fdc2208f1ca 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -185,11 +185,7 @@ struct rcu_cblist {
* ----------------------------------------------------------------------------
*/
#define SEGCBLIST_ENABLED BIT(0)
-#define SEGCBLIST_RCU_CORE BIT(1)
-#define SEGCBLIST_LOCKING BIT(2)
-#define SEGCBLIST_KTHREAD_CB BIT(3)
-#define SEGCBLIST_KTHREAD_GP BIT(4)
-#define SEGCBLIST_OFFLOADED BIT(5)
+#define SEGCBLIST_OFFLOADED BIT(1)
struct rcu_segcblist {
struct rcu_head *head;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 3dc1e58865f7..14dfa6008467 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -191,7 +191,10 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
+ *
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
@@ -519,7 +522,9 @@ static inline void hlist_del_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 13f6f00aecf9..58d84c59f3dd 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -34,10 +34,12 @@
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+#define RCU_SEQ_CTR_SHIFT 2
+#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
+
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
-void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
struct rcu_gp_oldstate;
@@ -144,11 +146,18 @@ void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu);
int rcu_nocb_cpu_deoffload(int cpu);
void rcu_nocb_flush_deferred_wakeup(void);
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s)
+
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
static inline void rcu_init_nohz(void) { }
static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s)
+
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
@@ -165,6 +174,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { }
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
+void rcu_tasks_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
@@ -191,6 +201,7 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
rcu_tasks_trace_qs_blkd(t); \
} \
} while (0)
+void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
@@ -202,8 +213,8 @@ do { \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
-void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);
+void rcu_tasks_rude_torture_stats_print(char *tt, char *tf);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d9ac7b136aea..0ee270b3f5ed 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
kvfree(ptr);
}
+static inline void kvfree_rcu_barrier(void)
+{
+ rcu_barrier();
+}
+
#ifdef CONFIG_KASAN_GENERIC
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
#else
@@ -158,7 +163,7 @@ void rcu_scheduler_starting(void);
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline void rcu_momentary_dyntick_idle(void) { }
+static inline void rcu_momentary_eqs(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
static inline bool rcu_gp_might_be_stalled(void) { return false; }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 254244202ea9..90a684f94776 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,9 +35,10 @@ static inline void rcu_virt_note_context_switch(void)
void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+void kvfree_rcu_barrier(void);
void rcu_barrier(void);
-void rcu_momentary_dyntick_idle(void);
+void rcu_momentary_eqs(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 122e38161acb..f9ccad32fc5c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1521,6 +1521,9 @@ struct regmap_irq_chip_data;
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
* @name: Descriptive name for IRQ controller.
+ * @domain_suffix: Name suffix to be appended to end of IRQ domain name. Needed
+ * when multiple regmap-IRQ controllers are created from same
+ * device.
*
* @main_status: Base main status register address. For chips which have
* interrupts arranged in separate sub-irq blocks with own IRQ
@@ -1606,6 +1609,7 @@ struct regmap_irq_chip_data;
*/
struct regmap_irq_chip {
const char *name;
+ const char *domain_suffix;
unsigned int main_status;
unsigned int num_main_status_bits;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index fd35d4ec12e1..17fbb7855295 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -89,6 +89,14 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long range_size,
+ struct lock_class_key *key);
+
+bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
+ long *data);
+
/*
* Because the ring buffer is generic, if other users of the ring buffer get
* traced by ftrace, it can produce lockdep warnings. We need to keep each
@@ -100,6 +108,18 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc_range(size, flags, order, start, range_size) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc_range((size), (flags), (order), (start), \
+ (range_size), &__key); \
+})
+
typedef bool (*ring_buffer_cond_fn)(void *data);
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
ring_buffer_cond_fn cond, void *data);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 0978c64f49d8..d5e93e44322e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -331,7 +331,7 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
switch (level) {
case RMAP_LEVEL_PTE:
if (!folio_test_large(folio)) {
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
@@ -425,7 +425,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
if (!folio_test_large(folio)) {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
@@ -745,7 +745,12 @@ int folio_mkclean(struct folio *);
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
-void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
+enum rmp_flags {
+ RMP_LOCKED = 1 << 0,
+ RMP_USE_SHARED_ZEROPAGE = 1 << 1,
+};
+
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
diff --git a/include/linux/rpmb.h b/include/linux/rpmb.h
new file mode 100644
index 000000000000..cccda73eea4d
--- /dev/null
+++ b/include/linux/rpmb.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Intel Corp. All rights reserved
+ * Copyright (C) 2021-2022 Linaro Ltd
+ */
+#ifndef __RPMB_H__
+#define __RPMB_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ * enum rpmb_type - type of underlying storage technology
+ *
+ * @RPMB_TYPE_EMMC : emmc (JESD84-B50.1)
+ * @RPMB_TYPE_UFS : UFS (JESD220)
+ * @RPMB_TYPE_NVME : NVM Express
+ */
+enum rpmb_type {
+ RPMB_TYPE_EMMC,
+ RPMB_TYPE_UFS,
+ RPMB_TYPE_NVME,
+};
+
+/**
+ * struct rpmb_descr - RPMB description provided by the underlying block device
+ *
+ * @type : block device type
+ * @route_frames : routes frames to and from the RPMB device
+ * @dev_id : unique device identifier read from the hardware
+ * @dev_id_len : length of unique device identifier
+ * @reliable_wr_count: number of sectors that can be written in one access
+ * @capacity : capacity of the device in units of 128K
+ *
+ * @dev_id is intended to be used as input when deriving the authenticaion key.
+ */
+struct rpmb_descr {
+ enum rpmb_type type;
+ int (*route_frames)(struct device *dev, u8 *req, unsigned int req_len,
+ u8 *resp, unsigned int resp_len);
+ u8 *dev_id;
+ size_t dev_id_len;
+ u16 reliable_wr_count;
+ u16 capacity;
+};
+
+/**
+ * struct rpmb_dev - device which can support RPMB partition
+ *
+ * @dev : device
+ * @id : device_id
+ * @list_node : linked list node
+ * @descr : RPMB description
+ */
+struct rpmb_dev {
+ struct device dev;
+ int id;
+ struct list_head list_node;
+ struct rpmb_descr descr;
+};
+
+#define to_rpmb_dev(x) container_of((x), struct rpmb_dev, dev)
+
+#if IS_ENABLED(CONFIG_RPMB)
+struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev);
+void rpmb_dev_put(struct rpmb_dev *rdev);
+struct rpmb_dev *rpmb_dev_find_device(const void *data,
+ const struct rpmb_dev *start,
+ int (*match)(struct device *dev,
+ const void *data));
+int rpmb_interface_register(struct class_interface *intf);
+void rpmb_interface_unregister(struct class_interface *intf);
+struct rpmb_dev *rpmb_dev_register(struct device *dev,
+ struct rpmb_descr *descr);
+int rpmb_dev_unregister(struct rpmb_dev *rdev);
+
+int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp, unsigned int resp_len);
+
+#else
+static inline struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev)
+{
+ return NULL;
+}
+
+static inline void rpmb_dev_put(struct rpmb_dev *rdev) { }
+
+static inline struct rpmb_dev *
+rpmb_dev_find_device(const void *data, const struct rpmb_dev *start,
+ int (*match)(struct device *dev, const void *data))
+{
+ return NULL;
+}
+
+static inline int rpmb_interface_register(struct class_interface *intf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void rpmb_interface_unregister(struct class_interface *intf)
+{
+}
+
+static inline struct rpmb_dev *
+rpmb_dev_register(struct device *dev, struct rpmb_descr *descr)
+{
+ return NULL;
+}
+
+static inline int rpmb_dev_unregister(struct rpmb_dev *dev)
+{
+ return 0;
+}
+
+static inline int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_RPMB */
+
+#endif /* __RPMB_H__ */
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index c09cdcc99471..189140bf11fc 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -40,7 +40,7 @@ struct sbitmap_word {
/**
* @swap_lock: serializes simultaneous updates of ->word and ->cleared
*/
- spinlock_t swap_lock;
+ raw_spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f8d150343d42..449dd64ed9ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -82,6 +82,8 @@ struct task_group;
struct task_struct;
struct user_event_mm;
+#include <linux/sched/ext.h>
+
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
@@ -149,8 +151,9 @@ struct user_event_mm;
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
*/
-#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
+ TASK_DEAD | TASK_FROZEN))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
# define debug_normal_state_change(state_value) \
@@ -541,9 +544,14 @@ struct sched_entity {
struct rb_node run_node;
u64 deadline;
u64 min_vruntime;
+ u64 min_slice;
struct list_head group_node;
- unsigned int on_rq;
+ unsigned char on_rq;
+ unsigned char sched_delayed;
+ unsigned char rel_deadline;
+ unsigned char custom_slice;
+ /* hole */
u64 exec_start;
u64 sum_exec_runtime;
@@ -639,12 +647,26 @@ struct sched_dl_entity {
*
* @dl_overrun tells if the task asked to be informed about runtime
* overruns.
+ *
+ * @dl_server tells if this is a server entity.
+ *
+ * @dl_defer tells if this is a deferred or regular server. For
+ * now only defer server exists.
+ *
+ * @dl_defer_armed tells if the deferrable server is waiting
+ * for the replenishment timer to activate it.
+ *
+ * @dl_defer_running tells if the deferrable server is actually
+ * running, skipping the defer phase.
*/
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
unsigned int dl_server : 1;
+ unsigned int dl_defer : 1;
+ unsigned int dl_defer_armed : 1;
+ unsigned int dl_defer_running : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -672,7 +694,7 @@ struct sched_dl_entity {
*/
struct rq *rq;
dl_server_has_tasks_f server_has_tasks;
- dl_server_pick_f server_pick;
+ dl_server_pick_f server_pick_task;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -810,6 +832,9 @@ struct task_struct {
struct sched_rt_entity rt;
struct sched_dl_entity dl;
struct sched_dl_entity *dl_server;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ struct sched_ext_entity scx;
+#endif
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
@@ -1243,7 +1268,6 @@ struct task_struct {
/* Sequence number to catch updates: */
seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
- int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock: */
@@ -1657,8 +1681,8 @@ extern struct pid *cad_pid;
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
-#define PF_MEMALLOC_NORECLAIM 0x00800000 /* All allocation requests will clear __GFP_DIRECT_RECLAIM */
-#define PF_MEMALLOC_NOWARN 0x01000000 /* All allocation requests will inherit __GFP_NOWARN */
+#define PF__HOLE__00800000 0x00800000
+#define PF__HOLE__01000000 0x01000000
#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index df3aca89d4f5..3a912ab42bb5 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -10,16 +10,16 @@
#include <linux/sched.h>
-#define MAX_DL_PRIO 0
-
-static inline int dl_prio(int prio)
+static inline bool dl_prio(int prio)
{
- if (unlikely(prio < MAX_DL_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_DL_PRIO);
}
-static inline int dl_task(struct task_struct *p)
+/*
+ * Returns true if a task has a priority that belongs to DL class. PI-boosted
+ * tasks will return true. Use dl_policy() to ignore PI-boosted tasks.
+ */
+static inline bool dl_task(struct task_struct *p)
{
return dl_prio(p->prio);
}
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
new file mode 100644
index 000000000000..1ddbde64a31b
--- /dev/null
+++ b/include/linux/sched/ext.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef _LINUX_SCHED_EXT_H
+#define _LINUX_SCHED_EXT_H
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+
+#include <linux/llist.h>
+#include <linux/rhashtable-types.h>
+
+enum scx_public_consts {
+ SCX_OPS_NAME_LEN = 128,
+
+ SCX_SLICE_DFL = 20 * 1000000, /* 20ms */
+ SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */
+};
+
+/*
+ * DSQ (dispatch queue) IDs are 64bit of the format:
+ *
+ * Bits: [63] [62 .. 0]
+ * [ B] [ ID ]
+ *
+ * B: 1 for IDs for built-in DSQs, 0 for ops-created user DSQs
+ * ID: 63 bit ID
+ *
+ * Built-in IDs:
+ *
+ * Bits: [63] [62] [61..32] [31 .. 0]
+ * [ 1] [ L] [ R ] [ V ]
+ *
+ * 1: 1 for built-in DSQs.
+ * L: 1 for LOCAL_ON DSQ IDs, 0 for others
+ * V: For LOCAL_ON DSQ IDs, a CPU number. For others, a pre-defined value.
+ */
+enum scx_dsq_id_flags {
+ SCX_DSQ_FLAG_BUILTIN = 1LLU << 63,
+ SCX_DSQ_FLAG_LOCAL_ON = 1LLU << 62,
+
+ SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0,
+ SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1,
+ SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2,
+ SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON,
+ SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
+};
+
+/*
+ * A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
+ * queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
+ * buffer between the scheduler core and the BPF scheduler. See the
+ * documentation for more details.
+ */
+struct scx_dispatch_q {
+ raw_spinlock_t lock;
+ struct list_head list; /* tasks in dispatch order */
+ struct rb_root priq; /* used to order by p->scx.dsq_vtime */
+ u32 nr;
+ u32 seq; /* used by BPF iter */
+ u64 id;
+ struct rhash_head hash_node;
+ struct llist_node free_node;
+ struct rcu_head rcu;
+};
+
+/* scx_entity.flags */
+enum scx_ent_flags {
+ SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */
+ SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */
+ SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
+
+ SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */
+ SCX_TASK_STATE_BITS = 2,
+ SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
+
+ SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */
+};
+
+/* scx_entity.flags & SCX_TASK_STATE_MASK */
+enum scx_task_state {
+ SCX_TASK_NONE, /* ops.init_task() not called yet */
+ SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */
+ SCX_TASK_READY, /* fully initialized, but not in sched_ext */
+ SCX_TASK_ENABLED, /* fully initialized and in sched_ext */
+
+ SCX_TASK_NR_STATES,
+};
+
+/* scx_entity.dsq_flags */
+enum scx_ent_dsq_flags {
+ SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */
+};
+
+/*
+ * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from
+ * everywhere and the following bits track which kfunc sets are currently
+ * allowed for %current. This simple per-task tracking works because SCX ops
+ * nest in a limited way. BPF will likely implement a way to allow and disallow
+ * kfuncs depending on the calling context which will replace this manual
+ * mechanism. See scx_kf_allow().
+ */
+enum scx_kf_mask {
+ SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
+ /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
+ SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
+ /* ops.dequeue (in REST) may be nested inside DISPATCH */
+ SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
+ SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
+ SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
+ SCX_KF_REST = 1 << 4, /* other rq-locked operations */
+
+ __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH |
+ SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+ __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+};
+
+enum scx_dsq_lnode_flags {
+ SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0,
+
+ /* high 16 bits can be for iter cursor flags */
+ __SCX_DSQ_LNODE_PRIV_SHIFT = 16,
+};
+
+struct scx_dsq_list_node {
+ struct list_head node;
+ u32 flags;
+ u32 priv; /* can be used by iter cursor */
+};
+
+/*
+ * The following is embedded in task_struct and contains all fields necessary
+ * for a task to be scheduled by SCX.
+ */
+struct sched_ext_entity {
+ struct scx_dispatch_q *dsq;
+ struct scx_dsq_list_node dsq_list; /* dispatch order */
+ struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
+ u32 dsq_seq;
+ u32 dsq_flags; /* protected by DSQ lock */
+ u32 flags; /* protected by rq lock */
+ u32 weight;
+ s32 sticky_cpu;
+ s32 holding_cpu;
+ u32 kf_mask; /* see scx_kf_mask above */
+ struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
+ atomic_long_t ops_state;
+
+ struct list_head runnable_node; /* rq->scx.runnable_list */
+ unsigned long runnable_at;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_sched_at; /* see scx_prio_less() */
+#endif
+ u64 ddsp_dsq_id;
+ u64 ddsp_enq_flags;
+
+ /* BPF scheduler modifiable fields */
+
+ /*
+ * Runtime budget in nsecs. This is usually set through
+ * scx_bpf_dispatch() but can also be modified directly by the BPF
+ * scheduler. Automatically decreased by SCX as the task executes. On
+ * depletion, a scheduling event is triggered.
+ *
+ * This value is cleared to zero if the task is preempted by
+ * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the
+ * task ran. Use p->se.sum_exec_runtime instead.
+ */
+ u64 slice;
+
+ /*
+ * Used to order tasks when dispatching to the vtime-ordered priority
+ * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime()
+ * but can also be modified directly by the BPF scheduler. Modifying it
+ * while a task is queued on a dsq may mangle the ordering and is not
+ * recommended.
+ */
+ u64 dsq_vtime;
+
+ /*
+ * If set, reject future sched_setscheduler(2) calls updating the policy
+ * to %SCHED_EXT with -%EACCES.
+ *
+ * Can be set from ops.init_task() while the BPF scheduler is being
+ * loaded (!scx_init_task_args->fork). If set and the task's policy is
+ * already %SCHED_EXT, the task's policy is rejected and forcefully
+ * reverted to %SCHED_NORMAL. The number of such events are reported
+ * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag
+ * during fork is not allowed.
+ */
+ bool disallow; /* reject switching into SCX */
+
+ /* cold fields */
+#ifdef CONFIG_EXT_GROUP_SCHED
+ struct cgroup *cgrp_moving_from;
+#endif
+ /* must be the last field, see init_scx_entity() */
+ struct list_head tasks_node;
+};
+
+void sched_ext_free(struct task_struct *p);
+void print_scx_info(const char *log_lvl, struct task_struct *p);
+
+#else /* !CONFIG_SCHED_CLASS_EXT */
+
+static inline void sched_ext_free(struct task_struct *p) {}
+static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 91546493c43d..928a626725e6 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -179,27 +179,20 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern unsigned long
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags);
+unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t);
unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
-unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t vm_flags);
-unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t);
-
unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
struct file *filp,
unsigned long addr,
@@ -211,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack) {}
@@ -258,25 +251,16 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{
unsigned int pflags = READ_ONCE(current->flags);
- if (unlikely(pflags & (PF_MEMALLOC_NOIO |
- PF_MEMALLOC_NOFS |
- PF_MEMALLOC_NORECLAIM |
- PF_MEMALLOC_NOWARN |
- PF_MEMALLOC_PIN))) {
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
/*
- * Stronger flags before weaker flags:
- * NORECLAIM implies NOIO, which in turn implies NOFS
+ * NOIO implies both NOIO and NOFS and it is a weaker context
+ * so always make sure it makes precedence
*/
- if (pflags & PF_MEMALLOC_NORECLAIM)
- flags &= ~__GFP_DIRECT_RECLAIM;
- else if (pflags & PF_MEMALLOC_NOIO)
+ if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
- if (pflags & PF_MEMALLOC_NOWARN)
- flags |= __GFP_NOWARN;
-
if (pflags & PF_MEMALLOC_PIN)
flags &= ~__GFP_MOVABLE;
}
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index ab83d85e1183..6ab43b4f72f9 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -14,6 +14,7 @@
*/
#define MAX_RT_PRIO 100
+#define MAX_DL_PRIO 0
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index b2b9e6eb9683..4e3338103654 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -6,19 +6,40 @@
struct task_struct;
-static inline int rt_prio(int prio)
+static inline bool rt_prio(int prio)
{
- if (unlikely(prio < MAX_RT_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO);
}
-static inline int rt_task(struct task_struct *p)
+static inline bool rt_or_dl_prio(int prio)
+{
+ return unlikely(prio < MAX_RT_PRIO);
+}
+
+/*
+ * Returns true if a task has a priority that belongs to RT class. PI-boosted
+ * tasks will return true. Use rt_policy() to ignore PI-boosted tasks.
+ */
+static inline bool rt_task(struct task_struct *p)
{
return rt_prio(p->prio);
}
-static inline bool task_is_realtime(struct task_struct *tsk)
+/*
+ * Returns true if a task has a priority that belongs to RT or DL classes.
+ * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore
+ * PI-boosted tasks.
+ */
+static inline bool rt_or_dl_task(struct task_struct *p)
+{
+ return rt_or_dl_prio(p->prio);
+}
+
+/*
+ * Returns true if a task has a policy that belongs to RT or DL classes.
+ * PI-boosted tasks will return false.
+ */
+static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
{
int policy = tsk->policy;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0a0e23c45406..c8ed09ac29ac 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -137,7 +137,7 @@ struct signal_struct {
/* POSIX.1b Interval Timers */
unsigned int next_posix_timer_id;
- struct list_head posix_timers;
+ struct hlist_head posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
@@ -276,8 +276,7 @@ static inline void signal_set_stop_flags(struct signal_struct *sig,
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
- kernel_siginfo_t *info, enum pid_type *type);
+extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
@@ -287,7 +286,7 @@ static inline int kernel_dequeue_signal(void)
int ret;
spin_lock_irq(&task->sighand->siglock);
- ret = dequeue_signal(task, &task->blocked, &__info, &__type);
+ ret = dequeue_signal(&task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index d362aacf9f89..0f2aeb37bbb0 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -63,7 +63,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_cancel_fork(struct task_struct *p);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
@@ -119,6 +120,11 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
return t;
}
+static inline struct task_struct *tryget_task_struct(struct task_struct *t)
+{
+ return refcount_inc_not_zero(&t->usage) ? t : NULL;
+}
+
extern void __put_task_struct(struct task_struct *t);
extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index ccd72b978e1f..bf10bdb487dd 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -95,23 +95,11 @@ static inline int object_is_on_stack(const void *obj)
extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p);
+#else
static inline unsigned long stack_not_used(struct task_struct *p)
{
- unsigned long *n = end_of_stack(p);
-
- do { /* Skip over canary */
-# ifdef CONFIG_STACK_GROWSUP
- n--;
-# else
- n++;
-# endif
- } while (!*n);
-
-# ifdef CONFIG_STACK_GROWSUP
- return (unsigned long)end_of_stack(p) - (unsigned long)n;
-# else
- return (unsigned long)n - (unsigned long)end_of_stack(p);
-# endif
+ return 0;
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
new file mode 100644
index 000000000000..066216f1357a
--- /dev/null
+++ b/include/linux/scmi_imx_protocol.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SCMI Message Protocol driver NXP extension header
+ *
+ * Copyright 2024 NXP.
+ */
+
+#ifndef _LINUX_SCMI_NXP_PROTOCOL_H
+#define _LINUX_SCMI_NXP_PROTOCOL_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+enum scmi_nxp_protocol {
+ SCMI_PROTOCOL_IMX_BBM = 0x81,
+ SCMI_PROTOCOL_IMX_MISC = 0x84,
+};
+
+struct scmi_imx_bbm_proto_ops {
+ int (*rtc_time_set)(const struct scmi_protocol_handle *ph, u32 id,
+ uint64_t sec);
+ int (*rtc_time_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u64 *val);
+ int (*rtc_alarm_set)(const struct scmi_protocol_handle *ph, u32 id,
+ bool enable, u64 sec);
+ int (*button_get)(const struct scmi_protocol_handle *ph, u32 *state);
+};
+
+enum scmi_nxp_notification_events {
+ SCMI_EVENT_IMX_BBM_RTC = 0x0,
+ SCMI_EVENT_IMX_BBM_BUTTON = 0x1,
+ SCMI_EVENT_IMX_MISC_CONTROL = 0x0,
+};
+
+struct scmi_imx_bbm_notif_report {
+ bool is_rtc;
+ bool is_button;
+ ktime_t timestamp;
+ unsigned int rtc_id;
+ unsigned int rtc_evt;
+};
+
+struct scmi_imx_misc_ctrl_notify_report {
+ ktime_t timestamp;
+ unsigned int ctrl_id;
+ unsigned int flags;
+};
+
+struct scmi_imx_misc_proto_ops {
+ int (*misc_ctrl_set)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 num, u32 *val);
+ int (*misc_ctrl_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 *num, u32 *val);
+ int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id, u32 evt_id, u32 flags);
+};
+#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 1390f1efb4f0..2ec8f3014757 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -83,6 +83,18 @@ enum lsm_event {
LSM_POLICY_CHANGE,
};
+struct dm_verity_digest {
+ const char *alg;
+ const u8 *digest;
+ size_t digest_len;
+};
+
+enum lsm_integrity_type {
+ LSM_INT_DMVERITY_SIG_VALID,
+ LSM_INT_DMVERITY_ROOTHASH,
+ LSM_INT_FSVERITY_BUILTINSIG_VALID,
+};
+
/*
* These are reasons that can be passed to the security_locked_down()
* LSM hook. Lockdown reasons that protect kernel integrity (ie, the
@@ -336,7 +348,7 @@ int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct cred *new);
int security_path_notify(const struct path *path, u64 mask,
unsigned int obj_type);
-int security_inode_alloc(struct inode *inode);
+int security_inode_alloc(struct inode *inode, gfp_t gfp);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
@@ -399,6 +411,9 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer
void security_inode_getsecid(struct inode *inode, u32 *secid);
int security_inode_copy_up(struct dentry *src, struct cred **new);
int security_inode_copy_up_xattr(struct dentry *src, const char *name);
+int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
@@ -509,6 +524,11 @@ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len,
void *val, size_t val_len, u64 id, u64 flags);
+int security_bdev_alloc(struct block_device *bdev);
+void security_bdev_free(struct block_device *bdev);
+int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -634,7 +654,7 @@ static inline int security_settime64(const struct timespec64 *ts,
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
+ return __vm_enough_memory(mm, pages, !cap_vm_enough_memory(mm, pages));
}
static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
@@ -769,7 +789,7 @@ static inline int security_path_notify(const struct path *path, u64 mask,
return 0;
}
-static inline int security_inode_alloc(struct inode *inode)
+static inline int security_inode_alloc(struct inode *inode, gfp_t gfp)
{
return 0;
}
@@ -1010,6 +1030,13 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
return 0;
}
+static inline int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
static inline int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
@@ -1483,6 +1510,23 @@ static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
{
return -EOPNOTSUPP;
}
+
+static inline int security_bdev_alloc(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline void security_bdev_free(struct block_device *bdev)
+{
+}
+
+static inline int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -2090,6 +2134,7 @@ struct dentry *securityfs_create_symlink(const char *name,
const char *target,
const struct inode_operations *iops);
extern void securityfs_remove(struct dentry *dentry);
+extern void securityfs_recursive_remove(struct dentry *dentry);
#else /* CONFIG_SECURITYFS */
@@ -2137,7 +2182,7 @@ extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token);
extern void security_bpf_prog_free(struct bpf_prog *prog);
extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path);
+ const struct path *path);
extern void security_bpf_token_free(struct bpf_token *token);
extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
@@ -2177,7 +2222,7 @@ static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
return 0;
}
@@ -2256,4 +2301,12 @@ static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_IO_URING */
+#ifdef CONFIG_SECURITY
+extern void security_initramfs_populated(void);
+#else
+static inline void security_initramfs_populated(void)
+{
+}
+#endif /* CONFIG_SECURITY */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index d90d8ee29d81..fffeb754880f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -157,7 +157,7 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
- unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
\
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return seq; \
@@ -170,7 +170,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
* Re-read the sequence counter since the (possibly \
* preempted) writer made progress. \
*/ \
- seq = READ_ONCE(s->seqcount.sequence); \
+ seq = smp_load_acquire(&s->seqcount.sequence); \
} \
\
return seq; \
@@ -208,7 +208,7 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
- return READ_ONCE(s->sequence);
+ return smp_load_acquire(&s->sequence);
}
static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -263,17 +263,9 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * __read_seqcount_begin() - begin a seqcount_t read section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
- * provided before actually loading any of the variables that are to be
- * protected in this critical section.
- *
- * Use carefully, only in critical code, and comment how the barrier is
- * provided.
- *
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
@@ -293,13 +285,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
*
* Return: count to be passed to read_seqcount_retry()
*/
-#define raw_read_seqcount_begin(s) \
-({ \
- unsigned _seq = __read_seqcount_begin(s); \
- \
- smp_rmb(); \
- _seq; \
-})
+#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -328,7 +314,6 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
({ \
unsigned __seq = seqprop_sequence(s); \
\
- smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index fd59ed2cca53..e0717c8393d7 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -193,7 +193,7 @@ void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac);
+ unsigned int quot);
int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index aea25eef9a1a..4ab65874a850 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -11,6 +11,8 @@
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/interrupt.h>
+#include <linux/lockdep.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/tty.h>
@@ -590,6 +592,95 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
+/**
+ * uart_port_set_cons - Safely set the @cons field for a uart
+ * @up: The uart port to set
+ * @con: The new console to set to
+ *
+ * This function must be used to set @up->cons. It uses the port lock to
+ * synchronize with the port lock wrappers in order to ensure that the console
+ * cannot change or disappear while another context is holding the port lock.
+ */
+static inline void uart_port_set_cons(struct uart_port *up, struct console *con)
+{
+ unsigned long flags;
+
+ __uart_port_lock_irqsave(up, &flags);
+ up->cons = con;
+ __uart_port_unlock_irqrestore(up, flags);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_using_nbcon(struct uart_port *up)
+{
+ lockdep_assert_held_once(&up->lock);
+
+ if (likely(!uart_console(up)))
+ return false;
+
+ /*
+ * @up->cons is only modified under the port lock. Therefore it is
+ * certain that it cannot disappear here.
+ *
+ * @up->cons->node is added/removed from the console list under the
+ * port lock. Therefore it is certain that the registration status
+ * cannot change here, thus @up->cons->flags can be read directly.
+ */
+ if (hlist_unhashed_lockless(&up->cons->node) ||
+ !(up->cons->flags & CON_NBCON) ||
+ !up->cons->write_atomic) {
+ return false;
+ }
+
+ return true;
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return true;
+
+ return nbcon_device_try_acquire(up->cons);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ while (!nbcon_device_try_acquire(up->cons))
+ cpu_relax();
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_release(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ nbcon_device_release(up->cons);
+}
+
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
@@ -597,6 +688,7 @@ struct uart_port {
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -606,6 +698,7 @@ static inline void uart_port_lock(struct uart_port *up)
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -616,6 +709,7 @@ static inline void uart_port_lock_irq(struct uart_port *up)
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -626,7 +720,15 @@ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *f
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
- return spin_trylock(&up->lock);
+ if (!spin_trylock(&up->lock))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock(&up->lock);
+ return false;
+ }
+
+ return true;
}
/**
@@ -638,7 +740,15 @@ static inline bool uart_port_trylock(struct uart_port *up)
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
- return spin_trylock_irqsave(&up->lock, *flags);
+ if (!spin_trylock_irqsave(&up->lock, *flags))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock_irqrestore(&up->lock, *flags);
+ return false;
+ }
+
+ return true;
}
/**
@@ -647,6 +757,7 @@ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long
*/
static inline void uart_port_unlock(struct uart_port *up)
{
+ __uart_port_nbcon_release(up);
spin_unlock(&up->lock);
}
@@ -656,6 +767,7 @@ static inline void uart_port_unlock(struct uart_port *up)
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
+ __uart_port_nbcon_release(up);
spin_unlock_irq(&up->lock);
}
@@ -666,6 +778,7 @@ static inline void uart_port_unlock_irq(struct uart_port *up)
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
+ __uart_port_nbcon_release(up);
spin_unlock_irqrestore(&up->lock, flags);
}
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index 1672cf0810ef..102aa33d956c 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -246,24 +246,28 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
-#define APPLE_S5L_UCON_RXTO_ENA 9
-#define APPLE_S5L_UCON_RXTHRESH_ENA 12
-#define APPLE_S5L_UCON_TXTHRESH_ENA 13
-#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
-#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
-#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA 11
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_LEGACY_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_TXTHRESH_ENA)
#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK | \
APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
-#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
-#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
-#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
-#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+#define APPLE_S5L_UTRSTAT_RXTO_LEGACY BIT(3)
+#define APPLE_S5L_UTRSTAT_RXTHRESH BIT(4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH BIT(5)
+#define APPLE_S5L_UTRSTAT_RXTO BIT(9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS GENMASK(9, 3)
#ifndef __ASSEMBLY__
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 95ac8398ee72..e7aec20fb44f 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -8,10 +8,10 @@
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
#include <asm/set_memory.h>
#else
-static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
#ifndef set_memory_rox
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 1d06b1e5408a..515a9a6a3c6f 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -111,20 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags);
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge);
+ loff_t write_end, bool shmem_huge_force);
#else
-static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags)
-{
- return false;
-}
static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge)
+ loff_t write_end, bool shmem_huge_force)
{
return 0;
}
@@ -150,8 +143,8 @@ enum sgp_type {
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
- enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
+ struct folio **foliop, enum sgp_type sgp);
struct folio *shmem_read_folio_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index eb2bf4629157..b35e2db7eb0e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -213,6 +213,12 @@ enum _slab_flag_bits {
#endif
/*
+ * freeptr_t represents a SLUB freelist pointer, which might be encoded
+ * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
+ */
+typedef struct { unsigned long v; } freeptr_t;
+
+/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
@@ -234,14 +240,173 @@ struct mem_cgroup;
*/
bool slab_is_available(void);
-struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
- unsigned int align, slab_flags_t flags,
- void (*ctor)(void *));
-struct kmem_cache *kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *));
+/**
+ * struct kmem_cache_args - Less common arguments for kmem_cache_create()
+ *
+ * Any uninitialized fields of the structure are interpreted as unused. The
+ * exception is @freeptr_offset where %0 is a valid value, so
+ * @use_freeptr_offset must be also set to %true in order to interpret the field
+ * as used. For @useroffset %0 is also valid, but only with non-%0
+ * @usersize.
+ *
+ * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
+ * fields unused.
+ */
+struct kmem_cache_args {
+ /**
+ * @align: The required alignment for the objects.
+ *
+ * %0 means no specific alignment is requested.
+ */
+ unsigned int align;
+ /**
+ * @useroffset: Usercopy region offset.
+ *
+ * %0 is a valid offset, when @usersize is non-%0
+ */
+ unsigned int useroffset;
+ /**
+ * @usersize: Usercopy region size.
+ *
+ * %0 means no usercopy region is specified.
+ */
+ unsigned int usersize;
+ /**
+ * @freeptr_offset: Custom offset for the free pointer
+ * in &SLAB_TYPESAFE_BY_RCU caches
+ *
+ * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
+ * outside of the object. This might cause the object to grow in size.
+ * Cache creators that have a reason to avoid this can specify a custom
+ * free pointer offset in their struct where the free pointer will be
+ * placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
+ * details).
+ *
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, %use_freeptr_offset must be set %true.
+ *
+ * Note that @ctor currently isn't supported with custom free pointers
+ * as a @ctor requires an external free pointer.
+ */
+ unsigned int freeptr_offset;
+ /**
+ * @use_freeptr_offset: Whether a @freeptr_offset is used.
+ */
+ bool use_freeptr_offset;
+ /**
+ * @ctor: A constructor for the objects.
+ *
+ * The constructor is invoked for each object in a newly allocated slab
+ * page. It is the cache user's responsibility to free object in the
+ * same state as after calling the constructor, or deal appropriately
+ * with any differences between a freshly constructed and a reallocated
+ * object.
+ *
+ * %NULL means no constructor.
+ */
+ void (*ctor)(void *);
+};
+
+struct kmem_cache *__kmem_cache_create_args(const char *name,
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags);
+static inline struct kmem_cache *
+__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+ slab_flags_t flags, void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/**
+ * kmem_cache_create_usercopy - Create a kmem cache with a region suitable
+ * for copying to userspace.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @ctor: A constructor for the objects, or %NULL.
+ *
+ * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
+ * if whitelisting a single field is sufficient, or kmem_cache_create() with
+ * the necessary parameters passed via the args parameter (see
+ * &struct kmem_cache_args)
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+static inline struct kmem_cache *
+kmem_cache_create_usercopy(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ .useroffset = useroffset,
+ .usersize = usersize,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/* If NULL is passed for @args, use this variant with default arguments. */
+static inline struct kmem_cache *
+__kmem_cache_default_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
+{
+ struct kmem_cache_args kmem_default_args = {};
+
+ /* Make sure we don't get passed garbage. */
+ if (WARN_ON_ONCE(args))
+ return ERR_PTR(-EINVAL);
+
+ return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
+}
+
+/**
+ * kmem_cache_create - Create a kmem cache.
+ * @__name: A string which is used in /proc/slabinfo to identify this cache.
+ * @__object_size: The size of objects to be created in this cache.
+ * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
+ * means defaults will be used for all the arguments.
+ *
+ * This is currently implemented as a macro using ``_Generic()`` to call
+ * either the new variant of the function, or a legacy one.
+ *
+ * The new variant has 4 parameters:
+ * ``kmem_cache_create(name, object_size, args, flags)``
+ *
+ * See __kmem_cache_create_args() which implements this.
+ *
+ * The legacy variant has 5 parameters:
+ * ``kmem_cache_create(name, object_size, align, flags, ctor)``
+ *
+ * The align and ctor parameters map to the respective fields of
+ * &struct kmem_cache_args
+ *
+ * Context: Cannot be called within a interrupt, but can be interrupted.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+#define kmem_cache_create(__name, __object_size, __args, ...) \
+ _Generic((__args), \
+ struct kmem_cache_args *: __kmem_cache_create_args, \
+ void *: __kmem_cache_default_args, \
+ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
+
void kmem_cache_destroy(struct kmem_cache *s);
int kmem_cache_shrink(struct kmem_cache *s);
@@ -253,20 +418,23 @@ int kmem_cache_shrink(struct kmem_cache *s);
* f.e. add ____cacheline_aligned_in_smp to the struct declaration
* then the objects will be properly aligned in SMP configurations.
*/
-#define KMEM_CACHE(__struct, __flags) \
- kmem_cache_create(#__struct, sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), NULL)
+#define KMEM_CACHE(__struct, __flags) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ }, (__flags))
/*
* To whitelist a single field for copying to/from usercopy, use this
* macro instead for KMEM_CACHE() above.
*/
-#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
- kmem_cache_create_usercopy(#__struct, \
- sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), \
- offsetof(struct __struct, __field), \
- sizeof_field(struct __struct, __field), NULL)
+#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ .useroffset = offsetof(struct __struct, __field), \
+ .usersize = sizeof_field(struct __struct, __field), \
+ }, (__flags))
/*
* Common kmalloc functions provided by all allocators
@@ -547,6 +715,35 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
+/**
+ * kmem_cache_charge - memcg charge an already allocated slab memory
+ * @objp: address of the slab object to memcg charge
+ * @gfpflags: describe the allocation context
+ *
+ * kmem_cache_charge allows charging a slab object to the current memcg,
+ * primarily in cases where charging at allocation time might not be possible
+ * because the target memcg is not known (i.e. softirq context)
+ *
+ * The objp should be pointer returned by the slab allocator functions like
+ * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
+ * behavior can be controlled through gfpflags parameter, which affects how the
+ * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
+ * that overcharging is requested instead of failure, but is not applied for the
+ * internal metadata allocation.
+ *
+ * There are several cases where it will return true even if the charging was
+ * not done:
+ * More specifically:
+ *
+ * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
+ * 2. Already charged slab objects.
+ * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
+ * without __GFP_ACCOUNT
+ * 4. Allocating internal metadata has failed
+ *
+ * Return: true if charge was successful otherwise false.
+ */
+bool kmem_cache_charge(void *objp, gfp_t gfpflags);
void kmem_cache_free(struct kmem_cache *s, void *objp);
kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
@@ -733,6 +930,16 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz
* @new_n: new number of elements to alloc
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * See krealloc_noprof() for further details.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
*/
static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
size_t new_n,
@@ -841,8 +1048,8 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
-extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
- __realloc_size(3);
+void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ __realloc_size(2);
#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
extern void kvfree(const void *addr);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index fcd61dfe2af3..f1aa0952e8c3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -109,7 +109,7 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
* Architecture specific boot CPU setup. Defined as empty weak function in
* init/main.c. Architectures can override it.
*/
-void smp_prepare_boot_cpu(void);
+void __init smp_prepare_boot_cpu(void);
#ifdef CONFIG_SMP
@@ -294,4 +294,10 @@ int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+bool csd_lock_is_stuck(void);
+#else
+static inline bool csd_lock_is_stuck(void) { return false; }
+#endif
+
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/soc/cirrus/ep93xx.h b/include/linux/soc/cirrus/ep93xx.h
index 56fbe2dc59b1..3e6cf2b25a97 100644
--- a/include/linux/soc/cirrus/ep93xx.h
+++ b/include/linux/soc/cirrus/ep93xx.h
@@ -2,7 +2,18 @@
#ifndef _SOC_EP93XX_H
#define _SOC_EP93XX_H
-struct platform_device;
+struct regmap;
+struct spinlock_t;
+
+enum ep93xx_soc_model {
+ EP93XX_9301_SOC,
+ EP93XX_9307_SOC,
+ EP93XX_9312_SOC,
+};
+
+#include <linux/auxiliary_bus.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
#define EP93XX_CHIP_REV_D0 3
#define EP93XX_CHIP_REV_D1 4
@@ -10,28 +21,18 @@ struct platform_device;
#define EP93XX_CHIP_REV_E1 6
#define EP93XX_CHIP_REV_E2 7
-#ifdef CONFIG_ARCH_EP93XX
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
-void ep93xx_pwm_release_gpio(struct platform_device *pdev);
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
-void ep93xx_ide_release_gpio(struct platform_device *pdev);
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev);
-void ep93xx_keypad_release_gpio(struct platform_device *pdev);
-int ep93xx_i2s_acquire(void);
-void ep93xx_i2s_release(void);
-unsigned int ep93xx_chip_revision(void);
+struct ep93xx_regmap_adev {
+ struct auxiliary_device adev;
+ struct regmap *map;
+ void __iomem *base;
+ spinlock_t *lock;
+ void (*write)(struct regmap *map, spinlock_t *lock, unsigned int reg,
+ unsigned int val);
+ void (*update_bits)(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask, unsigned int val);
+};
-#else
-static inline int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_pwm_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_ide_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_keypad_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_i2s_acquire(void) { return 0; }
-static inline void ep93xx_i2s_release(void) {}
-static inline unsigned int ep93xx_chip_revision(void) { return 0; }
-
-#endif
+#define to_ep93xx_regmap_adev(_adev) \
+ container_of((_adev), struct ep93xx_regmap_adev, adev)
#endif
diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h
index 0f038a1a0330..c3bca9c0bf2c 100644
--- a/include/linux/soc/qcom/geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -88,11 +88,15 @@ struct geni_se {
#define SE_GENI_M_IRQ_STATUS 0x610
#define SE_GENI_M_IRQ_EN 0x614
#define SE_GENI_M_IRQ_CLEAR 0x618
+#define SE_GENI_M_IRQ_EN_SET 0x61c
+#define SE_GENI_M_IRQ_EN_CLEAR 0x620
#define SE_GENI_S_CMD0 0x630
#define SE_GENI_S_CMD_CTRL_REG 0x634
#define SE_GENI_S_IRQ_STATUS 0x640
#define SE_GENI_S_IRQ_EN 0x644
#define SE_GENI_S_IRQ_CLEAR 0x648
+#define SE_GENI_S_IRQ_EN_SET 0x64c
+#define SE_GENI_S_IRQ_EN_CLEAR 0x650
#define SE_GENI_TX_FIFOn 0x700
#define SE_GENI_RX_FIFOn 0x780
#define SE_GENI_TX_FIFO_STATUS 0x800
@@ -101,6 +105,8 @@ struct geni_se {
#define SE_GENI_RX_WATERMARK_REG 0x810
#define SE_GENI_RX_RFR_WATERMARK_REG 0x814
#define SE_GENI_IOS 0x908
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
#define SE_DMA_TX_IRQ_STAT 0xc40
#define SE_DMA_TX_IRQ_CLR 0xc44
#define SE_DMA_TX_FSM_RST 0xc58
@@ -234,6 +240,9 @@ struct geni_se {
#define IO2_DATA_IN BIT(1)
#define RX_DATA_IN BIT(0)
+/* SE_GENI_M_GP_LENGTH and SE_GENI_S_GP_LENGTH fields */
+#define GP_LENGTH GENMASK(31, 0)
+
/* SE_DMA_TX_IRQ_STAT Register fields */
#define TX_DMA_DONE BIT(0)
#define TX_EOT BIT(1)
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 94fc1b57c57b..5e0dd47a0412 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -704,8 +704,6 @@ struct sdw_master_device {
container_of(d, struct sdw_master_device, dev)
struct sdw_driver {
- const char *name;
-
int (*probe)(struct sdw_slave *sdw,
const struct sdw_device_id *id);
int (*remove)(struct sdw_slave *sdw);
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index d537587b4499..37ae69365fe2 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -388,6 +388,7 @@ struct sdw_intel;
/* struct intel_sdw_hw_ops - SoundWire ops for Intel platforms.
* @debugfs_init: initialize all debugfs capabilities
* @debugfs_exit: close and cleanup debugfs capabilities
+ * @get_link_count: fetch link count from hardware registers
* @register_dai: read all PDI information and register DAIs
* @check_clock_stop: throw error message if clock is not stopped.
* @start_bus: normal start
@@ -412,6 +413,8 @@ struct sdw_intel_hw_ops {
void (*debugfs_init)(struct sdw_intel *sdw);
void (*debugfs_exit)(struct sdw_intel *sdw);
+ int (*get_link_count)(struct sdw_intel *sdw);
+
int (*register_dai)(struct sdw_intel *sdw);
void (*check_clock_stop)(struct sdw_intel *sdw);
@@ -447,4 +450,9 @@ extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+/*
+ * Max number of links supported in hardware
+ */
+#define SDW_INTEL_MAX_LINKS 5
+
#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d47d5f14ff99..4b95663163e0 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -498,7 +498,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* controller has native support for memory like operations.
* @mem_caps: controller capabilities for the handling of memory operations.
* @unprepare_message: undo any work done by prepare_message().
- * @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @target_abort: abort the ongoing transfer request on an SPI target controller
* @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
@@ -725,10 +724,7 @@ struct spi_controller {
struct spi_message *message);
int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message);
- union {
- int (*slave_abort)(struct spi_controller *ctlr);
- int (*target_abort)(struct spi_controller *ctlr);
- };
+ int (*target_abort)(struct spi_controller *ctlr);
/*
* These hooks are for drivers that use a generic implementation
@@ -802,11 +798,6 @@ static inline void spi_controller_put(struct spi_controller *ctlr)
put_device(&ctlr->dev);
}
-static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
-{
- return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
-}
-
static inline bool spi_controller_is_target(struct spi_controller *ctlr)
{
return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target;
@@ -1296,7 +1287,6 @@ extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_slave_abort(struct spi_device *spi);
extern int spi_target_abort(struct spi_device *spi);
static inline size_t
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index d4cb83195f7a..c92cd43a47f4 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -24,6 +24,7 @@ struct spi_bitbang {
#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
#define BITBANG_CS_INACTIVE 0
+ void (*set_mosi_idle)(struct spi_device *spi);
/* txrx_bufs() may handle dma mapping for transfers that don't
* already have one (transfer.{tx,rx}_dma is zero), or use PIO
*/
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 8f3f72480e78..ed57598394de 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -129,10 +129,23 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
+/*
+ * Values for initializing gp sequence fields. Higher values allow wrap arounds to
+ * occur earlier.
+ * The second value with state is useful in the case of static initialization of
+ * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
+ * lower bits (or else it will appear to be already initialized within
+ * the call check_init_srcu_struct()).
+ */
+#define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
+#define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
+
#define __SRCU_USAGE_INIT(name) \
{ \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = -1UL, \
+ .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
+ .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
+ .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
}
diff --git a/include/linux/string.h b/include/linux/string.h
index 9edace076ddb..0dd27afcfaf7 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -76,12 +76,16 @@ ssize_t sized_strscpy(char *, const char *, size_t);
* known size.
*/
#define __strscpy0(dst, src, ...) \
- sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst))
-#define __strscpy1(dst, src, size) sized_strscpy(dst, src, size)
+ sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy1(dst, src, size) \
+ sized_strscpy(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
#define __strscpy_pad0(dst, src, ...) \
- sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst))
-#define __strscpy_pad1(dst, src, size) sized_strscpy_pad(dst, src, size)
+ sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy_pad1(dst, src, size) \
+ sized_strscpy_pad(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
/**
* strscpy - Copy a C-string into a sized buffer
@@ -279,6 +283,18 @@ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *str, char old, char new);
+/**
+ * mem_is_zero - Check if an area of memory is all 0's.
+ * @s: The memory area
+ * @n: The size of the area
+ *
+ * Return: True if the area of memory is all 0's.
+ */
+static inline bool mem_is_zero(const void *s, size_t n)
+{
+ return !memchr_inv(s, 0, n);
+}
+
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
index d9ebe20229f8..120ca0f28e95 100644
--- a/include/linux/string_choices.h
+++ b/include/linux/string_choices.h
@@ -2,17 +2,32 @@
#ifndef _LINUX_STRING_CHOICES_H_
#define _LINUX_STRING_CHOICES_H_
+/*
+ * Here provide a series of helpers in the str_$TRUE_$FALSE format (you can
+ * also expand some helpers as needed), where $TRUE and $FALSE are their
+ * corresponding literal strings. These helpers can be used in the printing
+ * and also in other places where constant strings are required. Using these
+ * helpers offers the following benefits:
+ * 1) Reducing the hardcoding of strings, which makes the code more elegant
+ * through these simple literal-meaning helpers.
+ * 2) Unifying the output, which prevents the same string from being printed
+ * in various forms, such as enable/disable, enabled/disabled, en/dis.
+ * 3) Deduping by the linker, which results in a smaller binary file.
+ */
+
#include <linux/types.h>
static inline const char *str_enable_disable(bool v)
{
return v ? "enable" : "disable";
}
+#define str_disable_enable(v) str_enable_disable(!(v))
static inline const char *str_enabled_disabled(bool v)
{
return v ? "enabled" : "disabled";
}
+#define str_disabled_enabled(v) str_enabled_disabled(!(v))
static inline const char *str_hi_lo(bool v)
{
@@ -36,11 +51,25 @@ static inline const char *str_on_off(bool v)
{
return v ? "on" : "off";
}
+#define str_off_on(v) str_on_off(!(v))
static inline const char *str_yes_no(bool v)
{
return v ? "yes" : "no";
}
+#define str_no_yes(v) str_yes_no(!(v))
+
+static inline const char *str_up_down(bool v)
+{
+ return v ? "up" : "down";
+}
+#define str_down_up(v) str_up_down(!(v))
+
+static inline const char *str_true_false(bool v)
+{
+ return v ? "true" : "false";
+}
+#define str_false_true(v) str_true_false(!(v))
/**
* str_plural - Return the simple pluralization based on English counts
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 0c77ba488bba..fec1e8a1570c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -151,13 +151,15 @@ struct rpc_task_setup {
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
-#define RPC_TASK_RUNNING 0
-#define RPC_TASK_QUEUED 1
-#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_NEED_XMIT 3
-#define RPC_TASK_NEED_RECV 4
-#define RPC_TASK_MSG_PIN_WAIT 5
-#define RPC_TASK_SIGNALLED 6
+enum {
+ RPC_TASK_RUNNING,
+ RPC_TASK_QUEUED,
+ RPC_TASK_ACTIVE,
+ RPC_TASK_NEED_XMIT,
+ RPC_TASK_NEED_RECV,
+ RPC_TASK_MSG_PIN_WAIT,
+ RPC_TASK_SIGNALLED,
+};
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index a7d0406b9ef5..e68fecf6eab5 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -21,6 +21,7 @@
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include <linux/kthread.h>
/*
*
@@ -33,9 +34,9 @@
* node traffic on multi-node NUMA NFS servers.
*/
struct svc_pool {
- unsigned int sp_id; /* pool id; also node id on NUMA */
+ unsigned int sp_id; /* pool id; also node id on NUMA */
struct lwq sp_xprts; /* pending transports */
- atomic_t sp_nrthreads; /* # of threads in pool */
+ unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
@@ -66,9 +67,10 @@ enum {
* We currently do not support more than one RPC program per daemon.
*/
struct svc_serv {
- struct svc_program * sv_program; /* RPC program */
+ struct svc_program * sv_programs; /* RPC programs */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ unsigned int sv_nprogs; /* Number of sv_programs */
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -232,6 +234,11 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+
+ int rq_err; /* Thread sets this to inidicate
+ * initialisation success.
+ */
+
unsigned long bc_to_initval;
unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
@@ -305,6 +312,31 @@ static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
return test_bit(RQ_VICTIM, &rqstp->rq_flags);
}
+/**
+ * svc_thread_init_status - report whether thread has initialised successfully
+ * @rqstp: the thread in question
+ * @err: errno code
+ *
+ * After performing any initialisation that could fail, and before starting
+ * normal work, each sunrpc svc_thread must call svc_thread_init_status()
+ * with an appropriate error, or zero.
+ *
+ * If zero is passed, the thread is ready and must continue until
+ * svc_thread_should_stop() returns true. If a non-zero error is passed
+ * the call will not return - the thread will exit.
+ */
+static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err)
+{
+ rqstp->rq_err = err;
+ /* memory barrier ensures assignment to error above is visible before
+ * waitqueue_active() test below completes.
+ */
+ smp_mb();
+ wake_up_var(&rqstp->rq_err);
+ if (err)
+ kthread_exit(1);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -329,10 +361,9 @@ struct svc_process_info {
};
/*
- * List of RPC programs on the same transport endpoint
+ * RPC program - an array of these can use the same transport endpoint
*/
struct svc_program {
- struct svc_program * pg_next; /* other programs (same xprt) */
u32 pg_prog; /* program number */
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
@@ -401,19 +432,16 @@ struct svc_procedure {
*/
int sunrpc_set_pool_mode(const char *val);
int sunrpc_get_pool_mode(char *val, size_t size);
-int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
-struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
- struct svc_pool *pool, int node);
bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_release_pages(struct svc_rqst *rqstp);
-void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *prog,
+ unsigned int nprog,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data));
@@ -446,11 +474,6 @@ int svc_generic_rpcbind_set(struct net *net,
u32 version, int family,
unsigned short proto,
unsigned short port);
-int svc_rpcbind_set_version(struct net *net,
- const struct svc_program *progp,
- u32 version, int family,
- unsigned short proto,
- unsigned short port);
#define RPC_MAX_ADDRBUFLEN (63U)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index d33bab33099a..619fc0bd837a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,6 +48,7 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/sunrpc/svc_rdma_pcl.h>
+#include <linux/sunrpc/rdma_rn.h>
#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
@@ -76,6 +77,7 @@ struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
+ struct rpcrdma_notification sc_rn; /* removal notification */
int sc_ord; /* RDMA read limit */
int sc_max_send_sges;
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 61c455f1e1f5..2e111153f7cd 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -14,6 +14,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/cred.h>
@@ -151,13 +152,16 @@ struct auth_ops {
struct svc_xprt;
-extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
+extern void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *,
+ struct svc_cred *);
+
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7c78ec6356b9..bf45d9e8492a 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -58,8 +58,6 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
*/
void svc_recv(struct svc_rqst *rqstp);
void svc_send(struct svc_rqst *rqstp);
-void svc_drop(struct svc_rqst *);
-void svc_sock_update_bufs(struct svc_serv *serv);
int svc_addsock(struct svc_serv *serv, struct net *net,
const int fd, char *name_return, const size_t len,
const struct cred *cred);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 2f8dc47f1eb0..5f775e104f9a 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -13,7 +13,7 @@
#include <linux/uio.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/scatterlist.h>
struct bio_vec;
diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h
new file mode 100644
index 000000000000..66ca3ece951a
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_builtins.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__BUILTINS_H_
+#define _SUNRPC_XDRGEN__BUILTINS_H_
+
+#include <linux/sunrpc/xdr.h>
+
+static inline bool
+xdrgen_decode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_encode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_decode_bool(struct xdr_stream *xdr, bool *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (*p != xdr_zero);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_bool(struct xdr_stream *xdr, bool val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = val ? xdr_one : xdr_zero;
+ return true;
+}
+
+static inline bool
+xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_int(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_int(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_int(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_long(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_long(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_long(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_long(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_hyper(struct xdr_stream *xdr, s64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_hyper(struct xdr_stream *xdr, s64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_hyper(struct xdr_stream *xdr, u64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_hyper(struct xdr_stream *xdr, u64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (unsigned char *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_string(struct xdr_stream *xdr, string val, u32 maxlen)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (u8 *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_opaque(struct xdr_stream *xdr, opaque val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+#endif /* _SUNRPC_XDRGEN__BUILTINS_H_ */
diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h
new file mode 100644
index 000000000000..be9e62371758
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_defs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__DEFS_H_
+#define _SUNRPC_XDRGEN__DEFS_H_
+
+#define TRUE (true)
+#define FALSE (false)
+
+typedef struct {
+ u32 len;
+ unsigned char *data;
+} string;
+
+typedef struct {
+ u32 len;
+ u8 *data;
+} opaque;
+
+#endif /* _SUNRPC_XDRGEN__DEFS_H_ */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ba7ea95d1c57..ca533b478c21 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -243,22 +243,24 @@ enum {
* free clusters are organized into a list. We fetch an entry from the list to
* get a free cluster.
*
- * The data field stores next cluster if the cluster is free or cluster usage
- * counter otherwise. The flags field determines if a cluster is free. This is
- * protected by swap_info_struct.lock.
+ * The flags field determines if a cluster is free. This is
+ * protected by cluster lock.
*/
struct swap_cluster_info {
spinlock_t lock; /*
* Protect swap_cluster_info fields
- * and swap_info_struct->swap_map
- * elements correspond to the swap
- * cluster
+ * other than list, and swap_info_struct->swap_map
+ * elements corresponding to the swap cluster.
*/
- unsigned int data:24;
- unsigned int flags:8;
+ u16 count;
+ u8 flags;
+ u8 order;
+ struct list_head list;
};
#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
-#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
+#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
+#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
/*
* The first page in the swap file is the swap header, which is always marked
@@ -283,11 +285,6 @@ struct percpu_cluster {
unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
-struct swap_cluster_list {
- struct swap_cluster_info head;
- struct swap_cluster_info tail;
-};
-
/*
* The in-memory structure used to track swap areas.
*/
@@ -299,8 +296,15 @@ struct swap_info_struct {
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
- struct swap_cluster_list free_clusters; /* free clusters list */
+ struct list_head free_clusters; /* free clusters list */
+ struct list_head full_clusters; /* full clusters list */
+ struct list_head nonfull_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that contains at least one free slot */
+ struct list_head frag_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that are fragmented or contented */
+ unsigned int frag_cluster_nr[SWAP_NR_ORDERS];
unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */
@@ -331,7 +335,7 @@ struct swap_info_struct {
* list.
*/
struct work_struct discard_work; /* discard worker */
- struct swap_cluster_list discard_clusters; /* discard clusters list */
+ struct list_head discard_clusters; /* discard clusters list */
struct plist_node avail_lists[]; /*
* entries in swap_avail_heads, one
* entry per node.
@@ -478,9 +482,9 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t);
+extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t entry, int nr);
extern void swap_free_nr(swp_entry_t entry, int nr_pages);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
@@ -545,7 +549,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0;
}
-static inline void swap_shmem_alloc(swp_entry_t swp)
+static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
{
}
@@ -554,7 +558,7 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
-static inline int swapcache_prepare(swp_entry_t swp)
+static inline int swapcache_prepare(swp_entry_t swp, int nr)
{
return 0;
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4bcf6754738d..5758104921e6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -870,7 +870,7 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
#endif
asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
struct file_handle __user *handle,
- int __user *mnt_id, int flag);
+ void __user *mnt_id, int flag);
asmlinkage long sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index efd16ed52315..a38494d6b5f4 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -155,6 +155,18 @@ int tee_device_register(struct tee_device *teedev);
void tee_device_unregister(struct tee_device *teedev);
/**
+ * tee_device_set_dev_groups() - Set device attribute groups
+ * @teedev: Device to register
+ * @dev_groups: Attribute groups
+ *
+ * Assigns the provided @dev_groups to the @teedev to be registered later
+ * with tee_device_register(). Calling this function is optional, but if
+ * it's called it must be called before tee_device_register().
+ */
+void tee_device_set_dev_groups(struct tee_device *teedev,
+ const struct attribute_group **dev_groups);
+
+/**
* tee_session_calc_client_uuid() - Calculates client UUID for session
* @uuid: Resulting UUID
* @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index b86ddca46b9e..25ea8fe2313e 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -85,11 +85,17 @@ struct thermal_trip {
struct thermal_zone_device;
+struct cooling_spec {
+ unsigned long upper; /* Highest cooling state */
+ unsigned long lower; /* Lowest cooling state */
+ unsigned int weight; /* Cooling device weight */
+};
+
struct thermal_zone_device_ops {
- int (*bind) (struct thermal_zone_device *,
- struct thermal_cooling_device *);
- int (*unbind) (struct thermal_zone_device *,
- struct thermal_cooling_device *);
+ bool (*should_bind) (struct thermal_zone_device *,
+ const struct thermal_trip *,
+ struct thermal_cooling_device *,
+ struct cooling_spec *);
int (*get_temp) (struct thermal_zone_device *, int *);
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
@@ -203,15 +209,12 @@ static inline void devm_thermal_of_zone_unregister(struct device *dev,
}
#endif
-int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip);
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
-int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
struct thermal_trip *trip, int temp);
@@ -240,20 +243,6 @@ const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
int thermal_zone_device_id(struct thermal_zone_device *tzd);
struct device *thermal_zone_device(struct thermal_zone_device *tzd);
-int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight);
-int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
- struct thermal_cooling_device *,
- unsigned long, unsigned long,
- unsigned int);
-int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- struct thermal_cooling_device *cdev);
-int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
- struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 84ff2844df2a..902c20ef495a 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -73,7 +73,7 @@ struct tk_read_base {
* @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
- * we need to add to xtime (or xtime corrected for sub jiffie times)
+ * we need to add to xtime (or xtime corrected for sub jiffy times)
* to get to monotonic time. Monotonic is pegged at zero at system
* boot time, so wall_to_monotonic will be negative, however, we will
* ALWAYS keep the tv_nsec part positive so we can use the usual
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index e93ee8d936a9..587b96b4418e 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -537,7 +537,7 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
int rc);
void tpm2_end_auth_session(struct tpm_chip *chip);
#else
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static inline int tpm2_start_auth_session(struct tpm_chip *chip)
{
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 6be396bb4297..93a9f3070b48 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -64,6 +64,13 @@ struct tp_module {
bool trace_module_has_bad_taint(struct module *mod);
extern int register_tracepoint_module_notifier(struct notifier_block *nb);
extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
+void for_each_tracepoint_in_module(struct module *,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
#else
static inline bool trace_module_has_bad_taint(struct module *mod)
{
@@ -79,6 +86,19 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
{
return 0;
}
+static inline
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
+static inline
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
#endif /* CONFIG_MODULES */
/*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d8e4105a2f21..39c7cf82b0c2 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -33,6 +33,13 @@
})
#endif
+#ifdef masked_user_access_begin
+ #define can_do_masked_user_access() 1
+#else
+ #define can_do_masked_user_access() 0
+ #define masked_user_access_begin(src) NULL
+#endif
+
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
diff --git a/include/linux/ubsan.h b/include/linux/ubsan.h
index bff7445498de..d8219cbe09ff 100644
--- a/include/linux/ubsan.h
+++ b/include/linux/ubsan.h
@@ -4,6 +4,11 @@
#ifdef CONFIG_UBSAN_TRAP
const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type);
+#else
+static inline const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 7020adedfa08..853f9de5aa05 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -11,6 +11,7 @@
#include <uapi/linux/uio.h>
struct page;
+struct folio_queue;
typedef unsigned int __bitwise iov_iter_extraction_t;
@@ -25,6 +26,7 @@ enum iter_type {
ITER_IOVEC,
ITER_BVEC,
ITER_KVEC,
+ ITER_FOLIOQ,
ITER_XARRAY,
ITER_DISCARD,
};
@@ -66,6 +68,7 @@ struct iov_iter {
const struct iovec *__iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ const struct folio_queue *folioq;
struct xarray *xarray;
void __user *ubuf;
};
@@ -74,6 +77,7 @@ struct iov_iter {
};
union {
unsigned long nr_segs;
+ u8 folioq_slot;
loff_t xarray_start;
};
};
@@ -126,6 +130,11 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i)
return iov_iter_type(i) == ITER_DISCARD;
}
+static inline bool iov_iter_is_folioq(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_FOLIOQ;
+}
+
static inline bool iov_iter_is_xarray(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_XARRAY;
@@ -180,6 +189,12 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
return copy_page_to_iter(&folio->page, offset, bytes, i);
}
+static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_page_from_iter(&folio->page, offset, bytes, i);
+}
+
static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
size_t offset, size_t bytes, struct iov_iter *i)
{
@@ -273,6 +288,9 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
+void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
+ const struct folio_queue *folioq,
+ unsigned int first_slot, unsigned int offset, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
diff --git a/include/asm-generic/unaligned.h b/include/linux/unaligned.h
index a84c64e5f11e..4a9651017e3c 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/linux/unaligned.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_UNALIGNED_H
-#define __ASM_GENERIC_UNALIGNED_H
+#ifndef __LINUX_UNALIGNED_H
+#define __LINUX_UNALIGNED_H
/*
* This is the most generic implementation of unaligned accesses
@@ -8,16 +8,7 @@
*/
#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
-})
-
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
-} while (0)
+#include <vdso/unaligned.h>
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
@@ -152,4 +143,4 @@ static inline u64 get_unaligned_be48(const void *p)
return __get_unaligned_be48(p);
}
-#endif /* __ASM_GENERIC_UNALIGNED_H */
+#endif /* __LINUX_UNALIGNED_H */
diff --git a/include/linux/union_find.h b/include/linux/union_find.h
new file mode 100644
index 000000000000..cfd49263c138
--- /dev/null
+++ b/include/linux/union_find.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_UNION_FIND_H
+#define __LINUX_UNION_FIND_H
+/**
+ * union_find.h - union-find data structure implementation
+ *
+ * This header provides functions and structures to implement the union-find
+ * data structure. The union-find data structure is used to manage disjoint
+ * sets and supports efficient union and find operations.
+ *
+ * See Documentation/core-api/union_find.rst for documentation and samples.
+ */
+
+struct uf_node {
+ struct uf_node *parent;
+ unsigned int rank;
+};
+
+/* This macro is used for static initialization of a union-find node. */
+#define UF_INIT_NODE(node) {.parent = &node, .rank = 0}
+
+/**
+ * uf_node_init - Initialize a union-find node
+ * @node: pointer to the union-find node to be initialized
+ *
+ * This function sets the parent of the node to itself and
+ * initializes its rank to 0.
+ */
+static inline void uf_node_init(struct uf_node *node)
+{
+ node->parent = node;
+ node->rank = 0;
+}
+
+/* find the root of a node */
+struct uf_node *uf_find(struct uf_node *node);
+
+/* Merge two intersecting nodes */
+void uf_union(struct uf_node *node1, struct uf_node *node2);
+
+#endif /* __LINUX_UNION_FIND_H */
diff --git a/include/linux/unroll.h b/include/linux/unroll.h
new file mode 100644
index 000000000000..d42fd6366373
--- /dev/null
+++ b/include/linux/unroll.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __UNROLL_H
+#define __UNROLL_H
+
+#include <linux/args.h>
+
+#define UNROLL(N, MACRO, args...) CONCATENATE(__UNROLL_, N)(MACRO, args)
+
+#define __UNROLL_0(MACRO, args...)
+#define __UNROLL_1(MACRO, args...) __UNROLL_0(MACRO, args) MACRO(0, args)
+#define __UNROLL_2(MACRO, args...) __UNROLL_1(MACRO, args) MACRO(1, args)
+#define __UNROLL_3(MACRO, args...) __UNROLL_2(MACRO, args) MACRO(2, args)
+#define __UNROLL_4(MACRO, args...) __UNROLL_3(MACRO, args) MACRO(3, args)
+#define __UNROLL_5(MACRO, args...) __UNROLL_4(MACRO, args) MACRO(4, args)
+#define __UNROLL_6(MACRO, args...) __UNROLL_5(MACRO, args) MACRO(5, args)
+#define __UNROLL_7(MACRO, args...) __UNROLL_6(MACRO, args) MACRO(6, args)
+#define __UNROLL_8(MACRO, args...) __UNROLL_7(MACRO, args) MACRO(7, args)
+#define __UNROLL_9(MACRO, args...) __UNROLL_8(MACRO, args) MACRO(8, args)
+#define __UNROLL_10(MACRO, args...) __UNROLL_9(MACRO, args) MACRO(9, args)
+#define __UNROLL_11(MACRO, args...) __UNROLL_10(MACRO, args) MACRO(10, args)
+#define __UNROLL_12(MACRO, args...) __UNROLL_11(MACRO, args) MACRO(11, args)
+#define __UNROLL_13(MACRO, args...) __UNROLL_12(MACRO, args) MACRO(12, args)
+#define __UNROLL_14(MACRO, args...) __UNROLL_13(MACRO, args) MACRO(13, args)
+#define __UNROLL_15(MACRO, args...) __UNROLL_14(MACRO, args) MACRO(14, args)
+#define __UNROLL_16(MACRO, args...) __UNROLL_15(MACRO, args) MACRO(15, args)
+#define __UNROLL_17(MACRO, args...) __UNROLL_16(MACRO, args) MACRO(16, args)
+#define __UNROLL_18(MACRO, args...) __UNROLL_17(MACRO, args) MACRO(17, args)
+#define __UNROLL_19(MACRO, args...) __UNROLL_18(MACRO, args) MACRO(18, args)
+#define __UNROLL_20(MACRO, args...) __UNROLL_19(MACRO, args) MACRO(19, args)
+
+#endif /* __UNROLL_H */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index b503fafb7fb3..2b294bf1881f 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/wait.h>
+struct uprobe;
struct vm_area_struct;
struct mm_struct;
struct inode;
@@ -27,22 +28,22 @@ struct page;
#define MAX_URETPROBE_DEPTH 64
-enum uprobe_filter_ctx {
- UPROBE_FILTER_REGISTER,
- UPROBE_FILTER_UNREGISTER,
- UPROBE_FILTER_MMAP,
-};
-
struct uprobe_consumer {
+ /*
+ * handler() can return UPROBE_HANDLER_REMOVE to signal the need to
+ * unregister uprobe for current process. If UPROBE_HANDLER_REMOVE is
+ * returned, filter() callback has to be implemented as well and it
+ * should return false to "confirm" the decision to uninstall uprobe
+ * for the current process. If filter() is omitted or returns true,
+ * UPROBE_HANDLER_REMOVE is effectively ignored.
+ */
int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
int (*ret_handler)(struct uprobe_consumer *self,
unsigned long func,
struct pt_regs *regs);
- bool (*filter)(struct uprobe_consumer *self,
- enum uprobe_filter_ctx ctx,
- struct mm_struct *mm);
+ bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
- struct uprobe_consumer *next;
+ struct list_head cons_node;
};
#ifdef CONFIG_UPROBES
@@ -76,6 +77,8 @@ struct uprobe_task {
struct uprobe *active_uprobe;
unsigned long xol_vaddr;
+ struct arch_uprobe *auprobe;
+
struct return_instance *return_instances;
unsigned int depth;
};
@@ -110,10 +113,10 @@ extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
-extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
-extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
-extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
-extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
+extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
+extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
+extern void uprobe_unregister_sync(void);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void uprobe_start_dup_mmap(void);
@@ -151,22 +154,21 @@ static inline void uprobes_init(void)
#define uprobe_get_trap_addr(regs) instruction_pointer(regs)
-static inline int
-uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- return -ENOSYS;
-}
-static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
+static inline struct uprobe *
+uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
{
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
}
static inline int
-uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
+uprobe_apply(struct uprobe* uprobe, struct uprobe_consumer *uc, bool add)
{
return -ENOSYS;
}
static inline void
-uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
+{
+}
+static inline void uprobe_unregister_sync(void)
{
}
static inline int uprobe_mmap(struct vm_area_struct *vma)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 832997a9da0a..672d8fc2abdb 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -495,6 +495,12 @@ struct usb_dev_state;
struct usb_tt;
+enum usb_link_tunnel_mode {
+ USB_LINK_UNKNOWN = 0,
+ USB_LINK_NATIVE,
+ USB_LINK_TUNNELED,
+};
+
enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG,
@@ -605,6 +611,7 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
+ * @tunnel_mode: Connection native or tunneled over USB4
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -714,6 +721,7 @@ struct usb_device {
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned port_is_suspended:1;
+ enum usb_link_tunnel_mode tunnel_mode;
int slot_id;
struct usb2_lpm_parameters l1_params;
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index af3cd2aae4bc..6e38fb9d2117 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -256,7 +256,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
int usb_func_wakeup(struct usb_function *func);
-#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */
+#define MAX_CONFIG_INTERFACES 32
/**
* struct usb_configuration - represents one gadget configuration
diff --git a/include/linux/usb/func_utils.h b/include/linux/usb/func_utils.h
new file mode 100644
index 000000000000..c8795c965109
--- /dev/null
+++ b/include/linux/usb/func_utils.h
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * func_utils.h
+ *
+ * Utility definitions for USB functions
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
+ */
+
+#ifndef _FUNC_UTILS_H_
+#define _FUNC_UTILS_H_
+
+#include <linux/usb/gadget.h>
+#include <linux/overflow.h>
+
+/* Variable Length Array Macros **********************************************/
+#define vla_group(groupname) size_t groupname##__next = 0
+#define vla_group_size(groupname) groupname##__next
+
+#define vla_item(groupname, type, name, n) \
+ size_t groupname##_##name##__offset = ({ \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ size_t size = array_size(n, sizeof(type)); \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, size, \
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
+ offset; \
+ })
+
+#define vla_item_with_sz(groupname, type, name, n) \
+ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
+ size_t groupname##_##name##__offset = ({ \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, groupname##_##name##__sz,\
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
+ offset; \
+ })
+
+#define vla_ptr(ptr, groupname, name) \
+ ((void *) ((char *)ptr + groupname##_##name##__offset))
+
+struct usb_ep;
+struct usb_request;
+
+/**
+ * alloc_ep_req - returns a usb_request allocated by the gadget driver and
+ * allocates the request's buffer.
+ *
+ * @ep: the endpoint to allocate a usb_request
+ * @len: usb_requests's buffer suggested size
+ *
+ * In case @ep direction is OUT, the @len will be aligned to ep's
+ * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
+ * usb_requests's length (req->length) to refer to the allocated buffer size.
+ * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
+ */
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
+
+/* Frees a usb_request previously allocated by alloc_ep_req() */
+static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+{
+ WARN_ON(req->buf == NULL);
+ kfree(req->buf);
+ req->buf = NULL;
+ usb_ep_free_request(ep, req);
+}
+
+#endif /* _FUNC_UTILS_H_ */
diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h
index d61aebd68128..6b5d6838f865 100644
--- a/include/linux/usb/gadget_configfs.h
+++ b/include/linux/usb/gadget_configfs.h
@@ -4,9 +4,6 @@
#include <linux/configfs.h>
-int check_user_usb_string(const char *name,
- struct usb_gadget_strings *stringtab_dev);
-
#define GS_STRINGS_W(__struct, __name) \
static ssize_t __struct##_##__name##_store(struct config_item *item, \
const char *page, size_t len) \
@@ -37,7 +34,7 @@ static struct configfs_item_operations struct_in##_langid_item_ops = { \
.release = struct_in##_attr_release, \
}; \
\
-static struct config_item_type struct_in##_langid_type = { \
+static const struct config_item_type struct_in##_langid_type = { \
.ct_item_ops = &struct_in##_langid_item_ops, \
.ct_attrs = struct_in##_langid_attrs, \
.ct_owner = THIS_MODULE, \
@@ -94,7 +91,7 @@ static struct configfs_group_operations struct_in##_strings_ops = { \
.drop_item = &struct_in##_strings_drop, \
}; \
\
-static struct config_item_type struct_in##_strings_type = { \
+static const struct config_item_type struct_in##_strings_type = { \
.ct_group_ops = &struct_in##_strings_ops, \
.ct_owner = THIS_MODULE, \
}
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1a0a4dc87980..75b2b763f1ba 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -311,8 +311,11 @@ struct usb_serial_driver {
#define to_usb_serial_driver(d) \
container_of(d, struct usb_serial_driver, driver)
-int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
- const char *name, const struct usb_device_id *id_table);
+#define usb_serial_register_drivers(serial_drivers, name, id_table) \
+ __usb_serial_register_drivers(serial_drivers, THIS_MODULE, name, id_table)
+int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+ struct module *owner, const char *name,
+ const struct usb_device_id *id_table);
void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
void usb_serial_port_softint(struct usb_serial_port *port);
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 0ab39b6ea205..f7f5cfbdef12 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -63,15 +63,12 @@
#define TCPC_ROLE_CTRL 0x1a
#define TCPC_ROLE_CTRL_DRP BIT(6)
-#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
-#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
+#define TCPC_ROLE_CTRL_RP_VAL GENMASK(5, 4)
#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
-#define TCPC_ROLE_CTRL_CC2_SHIFT 2
-#define TCPC_ROLE_CTRL_CC2_MASK 0x3
-#define TCPC_ROLE_CTRL_CC1_SHIFT 0
-#define TCPC_ROLE_CTRL_CC1_MASK 0x3
+#define TCPC_ROLE_CTRL_CC2 GENMASK(3, 2)
+#define TCPC_ROLE_CTRL_CC1 GENMASK(1, 0)
#define TCPC_ROLE_CTRL_CC_RA 0x0
#define TCPC_ROLE_CTRL_CC_RP 0x1
#define TCPC_ROLE_CTRL_CC_RD 0x2
@@ -92,11 +89,9 @@
#define TCPC_CC_STATUS_TERM BIT(4)
#define TCPC_CC_STATUS_TERM_RP 0
#define TCPC_CC_STATUS_TERM_RD 1
+#define TCPC_CC_STATUS_CC2 GENMASK(3, 2)
+#define TCPC_CC_STATUS_CC1 GENMASK(1, 0)
#define TCPC_CC_STATE_SRC_OPEN 0
-#define TCPC_CC_STATUS_CC2_SHIFT 2
-#define TCPC_CC_STATUS_CC2_MASK 0x3
-#define TCPC_CC_STATUS_CC1_SHIFT 0
-#define TCPC_CC_STATUS_CC1_MASK 0x3
#define TCPC_POWER_STATUS 0x1e
#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7)
@@ -134,9 +129,8 @@
#define TCPC_MSG_HDR_INFO 0x2e
#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
+#define TCPC_MSG_HDR_INFO_REV GENMASK(2, 1)
#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
-#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
-#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
#define TCPC_RX_DETECT 0x2f
#define TCPC_RX_DETECT_HARD_RESET BIT(5)
@@ -154,10 +148,8 @@
#define TCPC_RX_DATA 0x34 /* through 0x4f */
#define TCPC_TRANSMIT 0x50
-#define TCPC_TRANSMIT_RETRY_SHIFT 4
-#define TCPC_TRANSMIT_RETRY_MASK 0x3
-#define TCPC_TRANSMIT_TYPE_SHIFT 0
-#define TCPC_TRANSMIT_TYPE_MASK 0x7
+#define TCPC_TRANSMIT_RETRY GENMASK(5, 4)
+#define TCPC_TRANSMIT_TYPE GENMASK(2, 0)
#define TCPC_TX_BYTE_CNT 0x51
#define TCPC_TX_HDR 0x52
@@ -178,8 +170,7 @@
#define tcpc_presenting_rd(reg, cc) \
(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
- (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
+ FIELD_GET(TCPC_ROLE_CTRL_## cc, reg) == TCPC_ROLE_CTRL_CC_RD)
struct tcpci;
@@ -190,7 +181,7 @@ struct tcpci;
* Optional; Callback to perform chip specific operations when FRS
* is sourcing vbus.
* @auto_discharge_disconnect:
- * Optional; Enables TCPC to autonously discharge vbus on disconnect.
+ * Optional; Enables TCPC to autonomously discharge vbus on disconnect.
* @vbus_vsafe0v:
* optional; Set when TCPC can detect whether vbus is at VSAFE0V.
* @set_partner_usb_comm_capable:
@@ -256,7 +247,7 @@ static inline enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
if (sink)
return TYPEC_CC_RP_3_0;
fallthrough;
- case 0x0:
+ case TCPC_CC_STATE_SRC_OPEN:
default:
return TYPEC_CC_OPEN;
}
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9f08a584d707..0b9f1e598e3a 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -76,8 +76,23 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13
+/* This one is special, as it indicates that the device is going away
+ * there are cyclic dependencies between tasklet, timer and bh
+ * that must be broken
+ */
+# define EVENT_UNPLUG 31
};
+static inline bool usbnet_going_away(struct usbnet *ubn)
+{
+ return test_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
+static inline void usbnet_mark_going_away(struct usbnet *ubn)
+{
+ set_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
static inline struct usb_driver *driver_of(struct usb_interface *intf)
{
return to_usb_driver(intf->dev.driver);
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 6030a8235617..3625096d5f85 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -21,9 +21,11 @@ struct uid_gid_extent {
};
struct uid_gid_map { /* 64 bytes -- 1 cache line */
- u32 nr_extents;
union {
- struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS];
+ struct {
+ struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS];
+ u32 nr_extents;
+ };
struct {
struct uid_gid_extent *forward;
struct uid_gid_extent *reverse;
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index a12bcf042551..9fc6ce15c499 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -267,6 +267,25 @@ extern void userfaultfd_unmap_complete(struct mm_struct *mm,
extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
+void userfaultfd_reset_ctx(struct vm_area_struct *vma);
+
+struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ unsigned long start, unsigned long end,
+ bool wp_async);
+
+void userfaultfd_release_new(struct userfaultfd_ctx *ctx);
+
+void userfaultfd_release_all(struct mm_struct *mm,
+ struct userfaultfd_ctx *ctx);
+
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 7977ca03ac7a..2e7a30fe6b92 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -582,11 +582,20 @@ void vdpa_set_status(struct vdpa_device *vdev, u8 status);
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
+ * @dev_set_attr: change a vdpa device's attr after it was create
+ * @mdev: parent device to use for device
+ * @dev: vdpa device structure
+ * @config:Attributes to be set for the device.
+ * The driver needs to check the mask of the structure and then set
+ * the related information to the vdpa device. The driver must return 0
+ * if set successfully.
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+ int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config);
};
/**
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 276ca543ef44..02a9f4dc594d 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -103,8 +103,10 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
+ if (skb_transport_offset(skb) < nh_min_len)
+ return -EINVAL;
- nh_min_len = max_t(u32, nh_min_len, skb_transport_offset(skb));
+ nh_min_len = skb_transport_offset(skb);
p_off = nh_min_len + thlen;
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 747943bc8cc2..aed952d04132 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -50,6 +50,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_ANON,
PGSTEAL_FILE,
#ifdef CONFIG_NUMA
+ PGSCAN_ZONE_RECLAIM_SUCCESS,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
@@ -104,6 +105,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
+ THP_UNDERUSED_SPLIT_PAGE,
THP_SPLIT_PMD,
THP_SCAN_EXCEED_NONE_PTE,
THP_SCAN_EXCEED_SWAP_PTE,
@@ -154,6 +156,30 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
VMA_LOCK_RETRY,
VMA_LOCK_MISS,
#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ KSTACK_1K,
+#if THREAD_SIZE > 1024
+ KSTACK_2K,
+#endif
+#if THREAD_SIZE > 2048
+ KSTACK_4K,
+#endif
+#if THREAD_SIZE > 4096
+ KSTACK_8K,
+#endif
+#if THREAD_SIZE > 8192
+ KSTACK_16K,
+#endif
+#if THREAD_SIZE > 16384
+ KSTACK_32K,
+#endif
+#if THREAD_SIZE > 32768
+ KSTACK_64K,
+#endif
+#if THREAD_SIZE > 65536
+ KSTACK_REST,
+#endif
+#endif /* CONFIG_DEBUG_STACK_USAGE */
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index e4a631ec430b..ad2ce7a6ab7a 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -189,6 +189,10 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
+void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ __realloc_size(2);
+#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
+
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9eb77c9007e6..d2761bf8ff32 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -32,6 +32,7 @@ struct reclaim_stat {
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
unsigned nr_lazyfree_fail;
+ unsigned nr_demoted;
};
/* Stat data for system wide items */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4eb8f9563136..59c2695e12e7 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -507,6 +507,47 @@ void workqueue_softirq_dead(unsigned int cpu);
__printf(1, 4) struct workqueue_struct *
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
+#ifdef CONFIG_LOCKDEP
+/**
+ * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @lockdep_map: user-defined lockdep_map
+ * @...: args for @fmt
+ *
+ * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
+ * workqueues created with the same purpose and to avoid leaking a lockdep_map
+ * on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+__printf(1, 5) struct workqueue_struct *
+alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
+ struct lockdep_map *lockdep_map, ...);
+
+/**
+ * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
+ * user-defined lockdep_map
+ *
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @lockdep_map: user-defined lockdep_map
+ * @args: args for @fmt
+ *
+ * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
+ * Useful for workqueues created with the same purpose and to avoid leaking a
+ * lockdep_map on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
+ alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
+ 1, lockdep_map, ##args)
+#endif
+
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 1a54676d843a..d6db822e4bb3 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,6 +79,9 @@ struct writeback_control {
*/
struct swap_iocb **swap_plug;
+ /* Target list for splitting a large folio */
+ struct list_head *list;
+
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
@@ -200,7 +203,8 @@ void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
- wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
+ wait_var_event(inode_state_wait_address(inode, __I_NEW),
+ !(READ_ONCE(inode->i_state) & I_NEW));
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -217,7 +221,7 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
size_t bytes);
int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
enum wb_reason reason, struct wb_completion *done);
-void cgroup_writeback_umount(void);
+void cgroup_writeback_umount(struct super_block *sb);
bool cleanup_offline_cgwb(struct bdi_writeback *wb);
/**
@@ -324,7 +328,7 @@ static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
{
}
-static inline void cgroup_writeback_umount(void)
+static inline void cgroup_writeback_umount(struct super_block *sb)
{
}
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 7285ca5d56e9..58ae1d746c6f 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* XZ decompressor
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_H
@@ -19,11 +18,6 @@
# include <stdint.h>
#endif
-/* In Linux, this is used to make extern functions static when needed. */
-#ifndef XZ_EXTERN
-# define XZ_EXTERN extern
-#endif
-
/**
* enum xz_mode - Operation mode
*
@@ -143,7 +137,7 @@ struct xz_buf {
size_t out_size;
};
-/**
+/*
* struct xz_dec - Opaque type to hold the XZ decoder state
*/
struct xz_dec;
@@ -191,7 +185,7 @@ struct xz_dec;
* ready to be used with xz_dec_run(). If memory allocation fails,
* xz_dec_init() returns NULL.
*/
-XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
/**
* xz_dec_run() - Run the XZ decoder
@@ -211,7 +205,7 @@ XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
* get that amount valid data from the beginning of the stream. You must use
* the multi-call decoder if you don't want to uncompress the whole stream.
*/
-XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
/**
* xz_dec_reset() - Reset an already allocated decoder state
@@ -224,32 +218,38 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
* xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
* multi-call mode.
*/
-XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+void xz_dec_reset(struct xz_dec *s);
/**
* xz_dec_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_init(). If s is NULL,
* this function does nothing.
*/
-XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+void xz_dec_end(struct xz_dec *s);
-/*
- * Decompressor for MicroLZMA, an LZMA variant with a very minimal header.
- * See xz_dec_microlzma_alloc() below for details.
+/**
+ * DOC: MicroLZMA decompressor
+ *
+ * This MicroLZMA header format was created for use in EROFS but may be used
+ * by others too. **In most cases one needs the XZ APIs above instead.**
*
- * These functions aren't used or available in preboot code and thus aren't
- * marked with XZ_EXTERN. This avoids warnings about static functions that
- * are never defined.
+ * The compressed format supported by this decoder is a raw LZMA stream
+ * whose first byte (always 0x00) has been replaced with bitwise-negation
+ * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
+ * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
+ * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
+ * marker must not be used. The unused values are reserved for future use.
*/
-/**
+
+/*
* struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
*/
struct xz_dec_microlzma;
/**
* xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
- * @mode XZ_SINGLE or XZ_PREALLOC
- * @dict_size LZMA dictionary size. This must be at least 4 KiB and
+ * @mode: XZ_SINGLE or XZ_PREALLOC
+ * @dict_size: LZMA dictionary size. This must be at least 4 KiB and
* at most 3 GiB.
*
* In contrast to xz_dec_init(), this function only allocates the memory
@@ -262,40 +262,30 @@ struct xz_dec_microlzma;
* On success, xz_dec_microlzma_alloc() returns a pointer to
* struct xz_dec_microlzma. If memory allocation fails or
* dict_size is invalid, NULL is returned.
- *
- * The compressed format supported by this decoder is a raw LZMA stream
- * whose first byte (always 0x00) has been replaced with bitwise-negation
- * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
- * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
- * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
- * marker must not be used. The unused values are reserved for future use.
- * This MicroLZMA header format was created for use in EROFS but may be used
- * by others too.
*/
-extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
- uint32_t dict_size);
+struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size);
/**
* xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
- * @s Decoder state allocated using xz_dec_microlzma_alloc()
- * @comp_size Compressed size of the input stream
- * @uncomp_size Uncompressed size of the input stream. A value smaller
+ * @s: Decoder state allocated using xz_dec_microlzma_alloc()
+ * @comp_size: Compressed size of the input stream
+ * @uncomp_size: Uncompressed size of the input stream. A value smaller
* than the real uncompressed size of the input stream can
* be specified if uncomp_size_is_exact is set to false.
* uncomp_size can never be set to a value larger than the
* expected real uncompressed size because it would eventually
* result in XZ_DATA_ERROR.
- * @uncomp_size_is_exact This is an int instead of bool to avoid
+ * @uncomp_size_is_exact: This is an int instead of bool to avoid
* requiring stdbool.h. This should normally be set to true.
* When this is set to false, error detection is weaker.
*/
-extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
- uint32_t comp_size, uint32_t uncomp_size,
- int uncomp_size_is_exact);
+void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
+ uint32_t uncomp_size, int uncomp_size_is_exact);
/**
* xz_dec_microlzma_run() - Run the MicroLZMA decoder
- * @s Decoder state initialized using xz_dec_microlzma_reset()
+ * @s: Decoder state initialized using xz_dec_microlzma_reset()
* @b: Input and output buffers
*
* This works similarly to xz_dec_run() with a few important differences.
@@ -329,15 +319,14 @@ extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
* may be changed normally like with XZ_PREALLOC. This way input data can be
* provided from non-contiguous memory.
*/
-extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s,
- struct xz_buf *b);
+enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, struct xz_buf *b);
/**
* xz_dec_microlzma_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_microlzma_alloc().
* If s is NULL, this function does nothing.
*/
-extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
+void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
/*
* Standalone build (userspace build or in-kernel build for boot time use)
@@ -358,13 +347,13 @@ extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
* This must be called before any other xz_* function to initialize
* the CRC32 lookup table.
*/
-XZ_EXTERN void xz_crc32_init(void);
+void xz_crc32_init(void);
/*
* Update CRC32 value using the polynomial from IEEE-802.3. To start a new
* calculation, the third argument must be zero. To continue the calculation,
* the previously returned value is passed as the third argument.
*/
-XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
#endif
#endif
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index 113408eef6ec..b2c7cf310c8f 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -77,6 +77,30 @@ int zstd_min_clevel(void);
*/
int zstd_max_clevel(void);
+/**
+ * zstd_default_clevel() - default compression level
+ *
+ * Return: Default compression level.
+ */
+int zstd_default_clevel(void);
+
+/**
+ * struct zstd_custom_mem - custom memory allocation
+ */
+typedef ZSTD_customMem zstd_custom_mem;
+
+/**
+ * struct zstd_dict_load_method - Dictionary load method.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictLoadMethod_e zstd_dict_load_method;
+
+/**
+ * struct zstd_dict_content_type - Dictionary context type.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictContentType_e zstd_dict_content_type;
+
/* ====== Parameter Selection ====== */
/**
@@ -136,6 +160,19 @@ typedef ZSTD_parameters zstd_parameters;
zstd_parameters zstd_get_params(int level,
unsigned long long estimated_src_size);
+
+/**
+ * zstd_get_cparams() - returns zstd_compression_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
+ * @dict_size: Dictionary size.
+ *
+ * Return: The selected zstd_compression_parameters.
+ */
+zstd_compression_parameters zstd_get_cparams(int level,
+ unsigned long long estimated_src_size, size_t dict_size);
+
/* ====== Single-pass Compression ====== */
typedef ZSTD_CCtx zstd_cctx;
@@ -180,6 +217,71 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters);
+/**
+ * zstd_create_cctx_advanced() - Create compression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to compression context otherwise.
+ */
+zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cctx() - Free compression context
+ * @cdict: Pointer to compression context.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cctx(zstd_cctx* cctx);
+
+/**
+ * struct zstd_cdict - Compression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_CDict zstd_cdict;
+
+/**
+ * zstd_create_cdict_byreference() - Create compression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_cdict is destroyed.
+ *
+ * Return: NULL on error, pointer to compression dictionary
+ * otherwise.
+ */
+zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
+ zstd_compression_parameters cparams,
+ zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cdict() - Free compression dictionary
+ * @cdict: Pointer to compression dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cdict(zstd_cdict* cdict);
+
+/**
+ * zstd_compress_using_cdict() - compress src into dst using a dictionary
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @cdict: The dictionary to be used.
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
+ size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_cdict *cdict);
+
/* ====== Single-pass Decompression ====== */
typedef ZSTD_DCtx zstd_dctx;
@@ -220,6 +322,71 @@ zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size);
+/**
+ * struct zstd_ddict - Decompression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_DDict zstd_ddict;
+
+/**
+ * zstd_create_ddict_byreference() - Create decompression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_ddict is destroyed.
+ *
+ * Return: NULL on error, pointer to decompression dictionary
+ * otherwise.
+ */
+zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size,
+ zstd_custom_mem custom_mem);
+/**
+ * zstd_free_ddict() - Free decompression dictionary
+ * @dict: Pointer to the dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_ddict(zstd_ddict *ddict);
+
+/**
+ * zstd_create_dctx_advanced() - Create decompression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to decompression context otherwise.
+ */
+zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_dctx() -- Free decompression context
+ * @dctx: Pointer to decompression context.
+ * Return: Always 0.
+ */
+size_t zstd_free_dctx(zstd_dctx *dctx);
+
+/**
+ * zstd_decompress_using_ddict() - decompress src into dst using a dictionary
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
+ * @ddict: The dictionary to be used.
+ *
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_decompress_using_ddict(zstd_dctx *dctx,
+ void *dst, size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_ddict *ddict);
+
+
/* ====== Streaming Buffers ====== */
/**
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 6cecb4a4f68b..9cd1beef0654 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -13,17 +13,15 @@ extern atomic_t zswap_stored_pages;
struct zswap_lruvec_state {
/*
- * Number of pages in zswap that should be protected from the shrinker.
- * This number is an estimate of the following counts:
+ * Number of swapped in pages from disk, i.e not found in the zswap pool.
*
- * a) Recent page faults.
- * b) Recent insertion to the zswap LRU. This includes new zswap stores,
- * as well as recent zswap LRU rotations.
- *
- * These pages are likely to be warm, and might incur IO if the are written
- * to swap.
+ * This is consumed and subtracted from the lru size in
+ * zswap_shrinker_count() to penalize past overshrinking that led to disk
+ * swapins. The idea is that had we considered this many more pages in the
+ * LRU active/protected and not written them back, we would not have had to
+ * swapped them in.
*/
- atomic_long_t nr_zswap_protected;
+ atomic_long_t nr_disk_swapins;
};
unsigned long zswap_total_pages(void);
diff --git a/include/media/cec.h b/include/media/cec.h
index d131514032f2..16b412b3131b 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -66,6 +66,8 @@ struct cec_data {
struct list_head xfer_list;
struct cec_adapter *adap;
struct cec_msg msg;
+ u8 match_len;
+ u8 match_reply[5];
struct cec_fh *fh;
struct delayed_work work;
struct completion c;
@@ -296,6 +298,37 @@ struct cec_adapter {
char input_phys[40];
};
+static inline int cec_get_device(struct cec_adapter *adap)
+{
+ struct cec_devnode *devnode = &adap->devnode;
+
+ /*
+ * Check if the cec device is available. This needs to be done with
+ * the devnode->lock held to prevent an open/unregister race:
+ * without the lock, the device could be unregistered and freed between
+ * the devnode->registered check and get_device() calls, leading to
+ * a crash.
+ */
+ mutex_lock(&devnode->lock);
+ /*
+ * return ENODEV if the cec device has been removed
+ * already or if it is not registered anymore.
+ */
+ if (!devnode->registered) {
+ mutex_unlock(&devnode->lock);
+ return -ENODEV;
+ }
+ /* and increase the device refcount */
+ get_device(&devnode->dev);
+ mutex_unlock(&devnode->lock);
+ return 0;
+}
+
+static inline void cec_put_device(struct cec_adapter *adap)
+{
+ put_device(&adap->devnode.dev);
+}
+
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
{
return adap->priv;
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 803349599c27..d095908073ef 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -127,7 +127,6 @@ struct lirc_fh {
* @min_timeout: minimum timeout supported by device
* @max_timeout: maximum timeout supported by device
* @rx_resolution : resolution (in us) of input sampler
- * @tx_resolution: resolution (in us) of output sampler
* @lirc_dev: lirc device
* @lirc_cdev: lirc char cdev
* @gap_start: start time for gap after timeout if non-zero
@@ -194,7 +193,6 @@ struct rc_dev {
u32 min_timeout;
u32 max_timeout;
u32 rx_resolution;
- u32 tx_resolution;
#ifdef CONFIG_LIRC
struct device lirc_dev;
struct cdev lirc_cdev;
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index ed0a44b6eada..1837c9fd78cf 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -178,6 +178,9 @@ void v4l2_pipeline_pm_put(struct media_entity *entity);
* @flags: New link flags that will be applied
* @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* React to link management on powered pipelines by updating the use count of
* all entities in the source and sink sides of the link. Entities are powered
* on or off accordingly. The use of this function should be paired
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index bd235d325ff9..8daa0929865c 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1250,6 +1250,12 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
* calls v4l2_subdev_link_validate_default() to ensure that
* width, height and the media bus pixel code are equal on both
* source and sink of the link.
+ *
+ * The function can be used as a drop-in &media_entity_ops.link_validate
+ * implementation for v4l2_subdev instances. It supports all links between
+ * subdevs, as well as links between subdevs and video devices, provided that
+ * the video devices also implement their &media_entity_ops.link_validate
+ * operation.
*/
int v4l2_subdev_link_validate(struct media_link *link);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 955237ac503d..9b02aeba4108 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -154,6 +154,8 @@ struct vb2_mem_ops {
* @mem_priv: private data with this plane.
* @dbuf: dma_buf - shared buffer object.
* @dbuf_mapped: flag to show whether dbuf is mapped or not
+ * @dbuf_duplicated: boolean to show whether dbuf is duplicated with a
+ * previous plane of the buffer.
* @bytesused: number of bytes occupied by data in the plane (payload).
* @length: size of this plane (NOT the payload) in bytes. The maximum
* valid size is MAX_UINT - PAGE_SIZE.
@@ -179,6 +181,7 @@ struct vb2_plane {
void *mem_priv;
struct dma_buf *dbuf;
unsigned int dbuf_mapped;
+ bool dbuf_duplicated;
unsigned int bytesused;
unsigned int length;
unsigned int min_length;
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 313d0b972e06..d9c767cf773d 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -27,7 +27,7 @@
#ifndef __L2CAP_H
#define __L2CAP_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
/* L2CAP defaults */
diff --git a/include/net/calipso.h b/include/net/calipso.h
index f8667a3fda9e..76b9e08c10c2 100644
--- a/include/net/calipso.h
+++ b/include/net/calipso.h
@@ -25,7 +25,7 @@
#include <net/netlabel.h>
#include <net/request_sock.h>
#include <linux/refcount.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* known doi values */
#define CALIPSO_DOI_UNKNOWN 0x00000000
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index c9111bb2f59b..d6780d7903f4 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -28,7 +28,7 @@
#include <net/request_sock.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* known doi values */
#define CIPSO_V4_DOI_UNKNOWN 0x00000000
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 9ab49bfeae78..c1d91f1d20f6 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -531,13 +531,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
- * @flags: allocation flags
*
* This function must hold the RTNL or rcu_read_lock().
*/
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
- unsigned int group, gfp_t flags);
+ unsigned int group);
/**
* genlmsg_unicast - unicast a netlink message
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 91762faecc13..02fbc036f34e 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -18,7 +18,7 @@
#define __RADIOTAP_H
#include <linux/kernel.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/**
* struct ieee80211_radiotap_header - base radiotap header
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 954dff901b69..333e0fae6796 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -22,7 +22,7 @@
#include <net/cfg80211.h>
#include <net/codel.h>
#include <net/ieee80211_radiotap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/**
* DOC: Introduction
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 1b5488fa2ff0..d72006a85f02 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -7,7 +7,7 @@
#ifndef NET_MAC802154_H
#define NET_MAC802154_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/af_ieee802154.h>
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 7b17c52e8ce2..28d59ae94ca3 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -295,7 +295,7 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev);
int mctp_routes_init(void);
void mctp_routes_exit(void);
-void mctp_device_init(void);
+int mctp_device_init(void);
void mctp_device_exit(void);
#endif /* __NET_MCTP_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 49708e7e1339..91ae20cb7648 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -2,7 +2,7 @@
#ifndef _NET_NF_TABLES_H
#define _NET_NF_TABLES_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index b45d57b5968a..2d3eb7cb4dff 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -29,6 +29,15 @@ static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
return msgtype & RTNL_KIND_MASK;
}
+struct rtnl_msg_handler {
+ struct module *owner;
+ int protocol;
+ int msgtype;
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+ int flags;
+};
+
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_register_module(struct module *owner, int protocol, int msgtype,
@@ -36,6 +45,14 @@ int rtnl_register_module(struct module *owner, int protocol, int msgtype,
int rtnl_unregister(int protocol, int msgtype);
void rtnl_unregister_all(int protocol);
+int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n);
+void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n);
+
+#define rtnl_register_many(handlers) \
+ __rtnl_register_many(handlers, ARRAY_SIZE(handlers))
+#define rtnl_unregister_many(handlers) \
+ __rtnl_unregister_many(handlers, ARRAY_SIZE(handlers))
+
static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
{
if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 79edd5b5e3c9..5d74fa7e694c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -848,7 +848,6 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch, to_free);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index c58ca8dd561b..db29c39e19a7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -894,6 +894,8 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
+#define sk_for_each_bound_safe(__sk, tmp, list) \
+ hlist_for_each_entry_safe(__sk, tmp, list, sk_bind_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f77f812bfbe7..d1948d357dad 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2435,9 +2435,26 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ if (likely(skb)) {
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ } else {
+ WARN_ONCE(1,
+ "rtx queue emtpy: "
+ "out:%u sacked:%u lost:%u retrans:%u "
+ "tlp_high_seq:%u sk_state:%u ca_state:%u "
+ "advmss:%u mss_cache:%u pmtu:%u\n",
+ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
+ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
+ tcp_sk(sk)->tlp_high_seq, sk->sk_state,
+ inet_csk(sk)->icsk_ca_state,
+ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
+ inet_csk(sk)->icsk_pmtu_cookie);
+ return jiffies_to_usecs(rto);
+ }
+
}
/*
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 8ae07c0ecdf7..1c4c1a69937a 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -7,7 +7,7 @@
#define IB_HDRS_H
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <rdma/ib_verbs.h>
#define IB_SEQ_NAK (3 << 29)
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 565a85044541..7dc7b1cc71b5 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,6 +38,7 @@ struct ib_umem_dmabuf {
unsigned long last_sg_trim;
void *private;
u8 pinned : 1;
+ u8 revoked : 1;
};
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
@@ -150,9 +151,15 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
int access);
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access);
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -196,12 +203,23 @@ ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
{
return -EOPNOTSUPP;
}
static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
+static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6c5712ae559d..aa8ede439905 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2476,7 +2476,7 @@ struct ib_device_ops {
struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
int mr_access_flags, struct ib_pd *pd,
@@ -4453,6 +4453,8 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ u32 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rdma/iba.h b/include/rdma/iba.h
index 6a1115b02a0d..dcae154edc26 100644
--- a/include/rdma/iba.h
+++ b/include/rdma/iba.h
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/bitfield.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static inline u32 _iba_get8(const u8 *ptr)
{
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index c2a79aeee113..326deaf56d5d 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -6,6 +6,8 @@
#include <linux/netlink.h>
#include <uapi/rdma/rdma_netlink.h>
+struct ib_device;
+
enum {
RDMA_NLDEV_ATTR_EMPTY_STRING = 1,
RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16,
@@ -110,6 +112,16 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
*/
bool rdma_nl_chk_listeners(unsigned int group);
+/**
+ * Prepare and send an event message
+ * @ib: the IB device which triggered the event
+ * @port_num: the port number which triggered the event - 0 if unused
+ * @type: the event type
+ * Returns 0 on success or a negative error code
+ */
+int rdma_nl_notify_event(struct ib_device *ib, u32 port_num,
+ enum rdma_nl_notify_event_type type);
+
struct rdma_link_ops {
struct list_head list;
const char *type;
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
index 4b1216de3f22..2b28a05e492b 100644
--- a/include/scsi/fcoe_sysfs.h
+++ b/include/scsi/fcoe_sysfs.h
@@ -50,9 +50,7 @@ struct fcoe_ctlr_device {
struct fcoe_sysfs_function_template *f;
struct list_head fcfs;
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
struct mutex lock;
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 7b196d234626..bd29cdb513a5 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -24,7 +24,6 @@ extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
const char **);
extern const char *scsi_mlreturn_string(int);
extern const char *scsi_hostbyte_string(int);
-extern const char *scsi_driverbyte_string(int);
#else
static inline bool
scsi_opcode_sa_name(int cmd, int sa,
@@ -76,12 +75,6 @@ scsi_hostbyte_string(int result)
return NULL;
}
-static inline const char *
-scsi_driverbyte_string(int result)
-{
- return NULL;
-}
-
#endif
#endif /* _SCSI_SCSI_DBG_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 19a1c5c48935..2b4ab0369ffb 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -677,7 +677,6 @@ struct Scsi_Host {
/*
* Optional work queue to be utilized by the transport
*/
- char work_q_name[20];
struct workqueue_struct *work_q;
/*
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 4b884b8013e0..d02b55261307 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -12,7 +12,7 @@
#include <linux/sched.h>
#include <linux/bsg-lib.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_netlink.h>
#include <scsi/scsi_host.h>
@@ -575,9 +575,7 @@ struct fc_host_attrs {
u16 npiv_vports_inuse;
/* work queues for rport state manipulation */
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
/* bsg support */
@@ -654,12 +652,8 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->next_vport_number)
#define fc_host_npiv_vports_inuse(x) \
(((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
-#define fc_host_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
#define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q)
-#define fc_host_devloss_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name)
#define fc_host_devloss_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index af793f2a0ec4..8f967d15e479 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -23,6 +23,8 @@
#include <linux/of_address.h>
#include <linux/types.h>
+struct device;
+
#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */
#define QE_NUM_OF_BRGS 16
#define QE_NUM_OF_PORTS 1024
@@ -93,8 +95,12 @@ int cpm_muram_init(void);
#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
s32 cpm_muram_alloc(unsigned long size, unsigned long align);
+s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align);
void cpm_muram_free(s32 offset);
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
+ unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(const void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
@@ -106,6 +112,12 @@ static inline s32 cpm_muram_alloc(unsigned long size,
return -ENOSYS;
}
+static inline s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align)
+{
+ return -ENOSYS;
+}
+
static inline void cpm_muram_free(s32 offset)
{
}
@@ -116,6 +128,13 @@ static inline s32 cpm_muram_alloc_fixed(unsigned long offset,
return -ENOSYS;
}
+static inline s32 devm_cpm_muram_alloc_fixed(struct device *dev,
+ unsigned long offset,
+ unsigned long size)
+{
+ return -ENOSYS;
+}
+
static inline void __iomem *cpm_muram_addr(unsigned long offset)
{
return NULL;
@@ -172,7 +191,6 @@ static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
/*
* Pin multiplexing functions.
*/
-struct device;
struct qe_pin;
#ifdef CONFIG_QE_GPIO
extern struct qe_pin *qe_pin_request(struct device *dev, int index);
@@ -233,7 +251,9 @@ static inline int qe_alive_during_sleep(void)
/* we actually use cpm_muram implementation, define this for convenience */
#define qe_muram_init cpm_muram_init
#define qe_muram_alloc cpm_muram_alloc
+#define devm_qe_muram_alloc devm_cpm_muram_alloc
#define qe_muram_alloc_fixed cpm_muram_alloc_fixed
+#define devm_qe_muram_alloc_fixed devm_cpm_muram_alloc_fixed
#define qe_muram_free cpm_muram_free
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
@@ -449,6 +469,7 @@ enum comm_dir {
#define QE_QMC_STOP_TX 0x0000000c
#define QE_QMC_STOP_RX 0x0000000d
#define QE_SS7_SU_FIL_RESET 0x0000000e
+#define QE_PUSHSCHED 0x0000000f
/* jonathbr added from here down for 83xx */
#define QE_RESET_BCS 0x0000000a
#define QE_MCC_INIT_TX_RX_16 0x00000003
diff --git a/include/sound/aci.h b/include/sound/aci.h
index 6ebbd4223f12..36a761c9820d 100644
--- a/include/sound/aci.h
+++ b/include/sound/aci.h
@@ -72,6 +72,7 @@
#define ACI_SET_EQ7 0x46 /* ... to Treble */
struct snd_miro_aci {
+ struct snd_card *card;
unsigned long aci_port;
int aci_vendor;
int aci_product;
diff --git a/include/sound/asoundef.h b/include/sound/asoundef.h
index 9fdeac19dadb..09b2c3dffb30 100644
--- a/include/sound/asoundef.h
+++ b/include/sound/asoundef.h
@@ -110,18 +110,22 @@
#define IEC958_AES2_CON_SOURCE_UNSPEC (0<<0) /* unspecified */
#define IEC958_AES2_CON_CHANNEL (15<<4) /* mask - channel number */
#define IEC958_AES2_CON_CHANNEL_UNSPEC (0<<4) /* unspecified */
-#define IEC958_AES3_CON_FS (15<<0) /* mask - sample frequency */
+#define IEC958_AES3_CON_FS ((1<<7) | (15<<0)) /* mask - sample frequency */
#define IEC958_AES3_CON_FS_44100 (0<<0) /* 44.1kHz */
#define IEC958_AES3_CON_FS_NOTID (1<<0) /* non indicated */
#define IEC958_AES3_CON_FS_48000 (2<<0) /* 48kHz */
#define IEC958_AES3_CON_FS_32000 (3<<0) /* 32kHz */
#define IEC958_AES3_CON_FS_22050 (4<<0) /* 22.05kHz */
+#define IEC958_AES3_CON_FS_384000 (5<<0) /* 384kHz */
#define IEC958_AES3_CON_FS_24000 (6<<0) /* 24kHz */
#define IEC958_AES3_CON_FS_88200 (8<<0) /* 88.2kHz */
#define IEC958_AES3_CON_FS_768000 (9<<0) /* 768kHz */
#define IEC958_AES3_CON_FS_96000 (10<<0) /* 96kHz */
#define IEC958_AES3_CON_FS_176400 (12<<0) /* 176.4kHz */
+#define IEC958_AES3_CON_FS_352400 (13<<0) /* 352.4kHz */
#define IEC958_AES3_CON_FS_192000 (14<<0) /* 192kHz */
+#define IEC958_AES3_CON_FS_128000 ((1<<7) | (11<<0)) /* 128kHz */
+#define IEC958_AES3_CON_FS_705600 ((1<<7) | (13<<0)) /* 705.6kHz */
#define IEC958_AES3_CON_CLOCK (3<<4) /* mask - clock accuracy */
#define IEC958_AES3_CON_CLOCK_1000PPM (0<<4) /* 1000 ppm */
#define IEC958_AES3_CON_CLOCK_50PPM (1<<4) /* 50 ppm */
diff --git a/include/sound/control.h b/include/sound/control.h
index c1659036c4a7..e07f6b960641 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -81,7 +81,7 @@ struct snd_kcontrol {
unsigned long private_value;
void *private_data;
void (*private_free)(struct snd_kcontrol *kcontrol);
- struct snd_kcontrol_volatile vd[]; /* volatile data */
+ struct snd_kcontrol_volatile vd[] __counted_by(count); /* volatile data */
};
#define snd_kcontrol(n) list_entry(n, struct snd_kcontrol, list)
@@ -140,9 +140,7 @@ int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
-struct snd_kcontrol *snd_ctl_find_numid_locked(struct snd_card *card, unsigned int numid);
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid);
-struct snd_kcontrol *snd_ctl_find_id_locked(struct snd_card *card, const struct snd_ctl_elem_id *id);
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, const struct snd_ctl_elem_id *id);
/**
@@ -167,29 +165,6 @@ snd_ctl_find_id_mixer(struct snd_card *card, const char *name)
return snd_ctl_find_id(card, &id);
}
-/**
- * snd_ctl_find_id_mixer_locked - find the control instance with the given name string
- * @card: the card instance
- * @name: the name string
- *
- * Finds the control instance with the given name and
- * @SNDRV_CTL_ELEM_IFACE_MIXER. Other fields are set to zero.
- *
- * This is merely a wrapper to snd_ctl_find_id_locked().
- * The caller must down card->controls_rwsem before calling this function.
- *
- * Return: The pointer of the instance if found, or %NULL if not.
- */
-static inline struct snd_kcontrol *
-snd_ctl_find_id_mixer_locked(struct snd_card *card, const char *name)
-{
- struct snd_ctl_elem_id id = {};
-
- id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- strscpy(id.name, name, sizeof(id.name));
- return snd_ctl_find_id_locked(card, &id);
-}
-
int snd_ctl_create(struct snd_card *card);
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn);
diff --git a/include/sound/core.h b/include/sound/core.h
index dfef0c9d4b9f..1f3f5dccd736 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -99,7 +99,7 @@ struct snd_card {
struct device *ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
struct rw_semaphore controls_rwsem; /* controls lock (list and values) */
- rwlock_t ctl_files_rwlock; /* ctl_files list lock */
+ rwlock_t controls_rwlock; /* lock for lookup and ctl_files list */
int controls_count; /* count of all controls */
size_t user_ctl_alloc_size; // current memory allocation by user controls.
struct list_head controls; /* all controls for this card */
@@ -345,46 +345,8 @@ void release_and_free_resource(struct resource *res);
/* --- */
-/* sound printk debug levels */
-enum {
- SND_PR_ALWAYS,
- SND_PR_DEBUG,
- SND_PR_VERBOSE,
-};
-
-#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
-__printf(4, 5)
-void __snd_printk(unsigned int level, const char *file, int line,
- const char *format, ...);
-#else
-#define __snd_printk(level, file, line, format, ...) \
- printk(format, ##__VA_ARGS__)
-#endif
-
-/**
- * snd_printk - printk wrapper
- * @fmt: format string
- *
- * Works like printk() but prints the file and the line of the caller
- * when configured with CONFIG_SND_VERBOSE_PRINTK.
- */
-#define snd_printk(fmt, ...) \
- __snd_printk(0, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
#ifdef CONFIG_SND_DEBUG
/**
- * snd_printd - debug printk
- * @fmt: format string
- *
- * Works like snd_printk() for debugging purposes.
- * Ignored when CONFIG_SND_DEBUG is not set.
- */
-#define snd_printd(fmt, ...) \
- __snd_printk(1, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-#define _snd_printd(level, fmt, ...) \
- __snd_printk(level, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
-/**
* snd_BUG - give a BUG warning message and stack trace
*
* Calls WARN() if CONFIG_SND_DEBUG is set.
@@ -393,12 +355,6 @@ void __snd_printk(unsigned int level, const char *file, int line,
#define snd_BUG() WARN(1, "BUG?\n")
/**
- * snd_printd_ratelimit - Suppress high rates of output when
- * CONFIG_SND_DEBUG is enabled.
- */
-#define snd_printd_ratelimit() printk_ratelimit()
-
-/**
* snd_BUG_ON - debugging check macro
* @cond: condition to evaluate
*
@@ -409,11 +365,6 @@ void __snd_printk(unsigned int level, const char *file, int line,
#else /* !CONFIG_SND_DEBUG */
-__printf(1, 2)
-static inline void snd_printd(const char *format, ...) {}
-__printf(2, 3)
-static inline void _snd_printd(int level, const char *format, ...) {}
-
#define snd_BUG() do { } while (0)
#define snd_BUG_ON(condition) ({ \
@@ -421,26 +372,8 @@ static inline void _snd_printd(int level, const char *format, ...) {}
unlikely(__ret_warn_on); \
})
-static inline bool snd_printd_ratelimit(void) { return false; }
-
#endif /* CONFIG_SND_DEBUG */
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-/**
- * snd_printdd - debug printk
- * @format: format string
- *
- * Works like snd_printk() for debugging purposes.
- * Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
- */
-#define snd_printdd(format, ...) \
- __snd_printk(2, __FILE__, __LINE__, format, ##__VA_ARGS__)
-#else
-__printf(1, 2)
-static inline void snd_printdd(const char *format, ...) {}
-#endif
-
-
#define SNDRV_OSS_VERSION ((3<<16)|(8<<8)|(1<<4)|(0)) /* 3.8.1a */
/* for easier backward-porting */
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index a51acefa785f..94e8185c4795 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -282,9 +282,9 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)
return (reg >> 16) == 3;
}
-extern struct regmap_config cs35l56_regmap_i2c;
-extern struct regmap_config cs35l56_regmap_spi;
-extern struct regmap_config cs35l56_regmap_sdw;
+extern const struct regmap_config cs35l56_regmap_i2c;
+extern const struct regmap_config cs35l56_regmap_spi;
+extern const struct regmap_config cs35l56_regmap_sdw;
extern const struct cirrus_amp_cal_controls cs35l56_calibration_controls;
diff --git a/include/sound/es1688.h b/include/sound/es1688.h
index 099569c31fbb..425a3717d77a 100644
--- a/include/sound/es1688.h
+++ b/include/sound/es1688.h
@@ -17,6 +17,7 @@
#define ES1688_HW_UNDEF 0x0003
struct snd_es1688 {
+ struct snd_card *card;
unsigned long port; /* port of ESS chip */
struct resource *res_port;
unsigned long mpu_port; /* MPU-401 port of ESS chip */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 7e39d486374a..b098ceadbe74 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -590,7 +590,7 @@ void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
unsigned int streams);
void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
- unsigned int streams);
+ unsigned int streams, bool start);
int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 43d524580bd2..9dd475cf4e8c 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -42,17 +42,12 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
+#define SNDRV_DMA_TYPE_DEV_SG 3 /* S/G pages */
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
#endif
-/* fallback types, don't use those directly */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK 10
-#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK 11
-#endif
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index ac8f3aef9205..0bf7d25434d7 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -123,6 +123,10 @@ struct snd_pcm_ops {
#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */
#define SNDRV_PCM_RATE_705600 (1U<<15) /* 705600Hz */
#define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */
+/* extended rates since 6.12 */
+#define SNDRV_PCM_RATE_12000 (1U<<17) /* 12000Hz */
+#define SNDRV_PCM_RATE_24000 (1U<<18) /* 24000Hz */
+#define SNDRV_PCM_RATE_128000 (1U<<19) /* 128000Hz */
#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */
@@ -498,6 +502,9 @@ struct snd_pcm_substream {
/* misc flags */
unsigned int hw_opened: 1;
unsigned int managed_buffer_alloc:1;
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ unsigned int xrun_counter; /* number of times xrun happens */
+#endif /* CONFIG_SND_PCM_XRUN_DEBUG */
};
#define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
@@ -1355,48 +1362,6 @@ snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type,
return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0);
}
-int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
- size_t size, gfp_t gfp_flags);
-int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
-struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
- unsigned long offset);
-/**
- * snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer
- * @substream: the substream to allocate the buffer to
- * @size: the requested buffer size, in bytes
- *
- * Allocates the PCM substream buffer using vmalloc(), i.e., the memory is
- * contiguous in kernel virtual space, but not in physical memory. Use this
- * if the buffer is accessed by kernel code but not by device DMA.
- *
- * Return: 1 if the buffer was changed, 0 if not changed, or a negative error
- * code.
- */
-static inline int snd_pcm_lib_alloc_vmalloc_buffer
- (struct snd_pcm_substream *substream, size_t size)
-{
- return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
-}
-
-/**
- * snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer
- * @substream: the substream to allocate the buffer to
- * @size: the requested buffer size, in bytes
- *
- * This function works like snd_pcm_lib_alloc_vmalloc_buffer(), but uses
- * vmalloc_32(), i.e., the pages are allocated from 32-bit-addressable memory.
- *
- * Return: 1 if the buffer was changed, 0 if not changed, or a negative error
- * code.
- */
-static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
- (struct snd_pcm_substream *substream, size_t size)
-{
- return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
- GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-}
-
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
/**
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index c8621671fa70..00c32eed2124 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -86,10 +86,6 @@ static inline size_t snd_seq_event_packet_size(struct snd_seq_event *ev)
/* interface for OSS emulation */
int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo);
-/* port callback routines */
-void snd_port_init_callback(struct snd_seq_port_callback *p);
-struct snd_seq_port_callback *snd_port_alloc_callback(void);
-
/* port attach/detach */
int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp,
int cap, int type, int midi_channels, int midi_voices, char *portname);
diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h
index 55053557c898..27f7e8a477c2 100644
--- a/include/sound/snd_wavefront.h
+++ b/include/sound/snd_wavefront.h
@@ -137,8 +137,4 @@ extern int snd_wavefront_fx_ioctl (struct snd_hwdep *,
extern int snd_wavefront_fx_open (struct snd_hwdep *, struct file *);
extern int snd_wavefront_fx_release (struct snd_hwdep *, struct file *);
-/* prefix in all snd_printk() delivered messages */
-
-#define LOGNAME "WaveFront: "
-
#endif /* __SOUND_SND_WAVEFRONT_H__ */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 4843b57798f6..daed7123df9d 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -33,6 +33,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
@@ -44,6 +45,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 38ccec4e3fcd..60d3b86a4660 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -62,7 +62,6 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @platform: string used for HDAudio codec support
* @codec_mask: used for HDAudio support
* @dmic_num: number of SoC- or chipset-attached PDM digital microphones
- * @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
* @link_mask: SoundWire links enabled on the board
* @links: array of SoundWire link _ADR descriptors, null terminated
* @i2s_link_mask: I2S/TDM links enabled on the board
@@ -70,15 +69,16 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
* @subsystem_vendor: optional PCI SSID vendor value
* @subsystem_device: optional PCI SSID device value
+ * @subsystem_rev: optional PCI SSID revision value
* @subsystem_id_set: true if a value has been written to
* subsystem_vendor and subsystem_device.
+ * @bt_link_mask: BT offload link enabled on the board
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
const char *platform;
u32 codec_mask;
u32 dmic_num;
- bool common_hdmi_codec_drv;
u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
u32 i2s_link_mask;
@@ -86,7 +86,9 @@ struct snd_soc_acpi_mach_params {
struct snd_soc_dai_driver *dai_drivers;
unsigned short subsystem_vendor;
unsigned short subsystem_device;
+ unsigned short subsystem_rev;
bool subsystem_id_set;
+ u32 bt_link_mask;
};
/**
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index 1f4c39922d82..ecc02e955279 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -30,8 +30,6 @@ static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
-struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
- const char *name);
int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
struct snd_soc_jack *jack);
int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index bf2e381cd124..61534ac0edd1 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -464,9 +464,6 @@ int snd_soc_component_force_enable_pin_unlocked(
/* component controls */
struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component,
const char * const ctl);
-struct snd_kcontrol *
-snd_soc_component_get_kcontrol_locked(struct snd_soc_component *component,
- const char * const ctl);
int snd_soc_component_notify_control(struct snd_soc_component *component,
const char * const ctl);
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index bbb72ad4c951..0d1b215f24f4 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -219,7 +219,6 @@ void snd_soc_dai_resume(struct snd_soc_dai *dai);
int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
struct snd_soc_pcm_runtime *rtd, int num);
bool snd_soc_dai_stream_valid(const struct snd_soc_dai *dai, int stream);
-void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link);
void snd_soc_dai_action(struct snd_soc_dai *dai,
int stream, int action);
static inline void snd_soc_dai_activate(struct snd_soc_dai *dai,
@@ -240,8 +239,6 @@ int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd);
int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream);
int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd,
int rollback);
-int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
- int cmd);
void snd_soc_pcm_dai_delay(struct snd_pcm_substream *substream,
snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
@@ -345,8 +342,7 @@ struct snd_soc_dai_ops {
*/
int (*trigger)(struct snd_pcm_substream *, int,
struct snd_soc_dai *);
- int (*bespoke_trigger)(struct snd_pcm_substream *, int,
- struct snd_soc_dai *);
+
/*
* For hardware based FIFO caused delay reporting.
* Optional.
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index ebd24753dd00..c6fb350b4b06 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -58,7 +58,6 @@ enum snd_soc_dpcm_state {
enum snd_soc_dpcm_trigger {
SND_SOC_DPCM_TRIGGER_PRE = 0,
SND_SOC_DPCM_TRIGGER_POST,
- SND_SOC_DPCM_TRIGGER_BESPOKE,
};
/*
@@ -114,24 +113,6 @@ struct snd_soc_dpcm_runtime {
#define for_each_dpcm_be_rollback(fe, stream, _dpcm) \
list_for_each_entry_continue_reverse(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-/* can this BE stop and free */
-int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* can this BE perform a hw_params() */
-int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* can this BE perform prepare */
-int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* is the current PCM operation for this FE ? */
-int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream);
-
-/* is the current PCM operation for this BE ? */
-int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
/* get the substream for this BE */
struct snd_pcm_substream *
diff --git a/include/sound/soc.h b/include/sound/soc.h
index a8e66bbf932b..e6e359c1a2ac 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -815,6 +815,7 @@ struct snd_soc_dai_link {
/* This DAI link can route to other DAI links at runtime (Frontend)*/
unsigned int dynamic:1;
+ /* REMOVE ME */
/* DPCM capture and Playback support */
unsigned int dpcm_capture:1;
unsigned int dpcm_playback:1;
@@ -1206,11 +1207,11 @@ struct snd_soc_pcm_runtime {
/* bit field */
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
+ unsigned int initialized:1;
- bool initialized;
-
+ /* CPU/Codec/Platform */
int num_components;
- struct snd_soc_component *components[]; /* CPU/Codec/Platform */
+ struct snd_soc_component *components[] __counted_by(num_components);
};
/* see soc_new_pcm_runtime() */
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
new file mode 100644
index 000000000000..f68c1f193b3b
--- /dev/null
+++ b/include/sound/soc_sdw_utils.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This file incorporates work covered by the following copyright notice:
+ * Copyright (c) 2020 Intel Corporation
+ * Copyright(c) 2024 Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef SOC_SDW_UTILS_H
+#define SOC_SDW_UTILS_H
+
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+
+#define SOC_SDW_MAX_DAI_NUM 8
+#define SOC_SDW_MAX_NO_PROPS 2
+#define SOC_SDW_JACK_JDSRC(quirk) ((quirk) & GENMASK(3, 0))
+
+/* If a CODEC has an optional speaker output, this quirk will enable it */
+#define SOC_SDW_CODEC_SPKR BIT(15)
+/*
+ * If the CODEC has additional devices attached directly to it.
+ *
+ * For the cs42l43:
+ * - 0 - No speaker output
+ * - SOC_SDW_CODEC_SPKR - CODEC internal speaker
+ * - SOC_SDW_SIDECAR_AMPS - 2x Sidecar amplifiers + CODEC internal speaker
+ * - SOC_SDW_CODEC_SPKR | SOF_SIDECAR_AMPS - Not currently supported
+ */
+#define SOC_SDW_SIDECAR_AMPS BIT(16)
+
+#define SOC_SDW_UNUSED_DAI_ID -1
+#define SOC_SDW_JACK_OUT_DAI_ID 0
+#define SOC_SDW_JACK_IN_DAI_ID 1
+#define SOC_SDW_AMP_OUT_DAI_ID 2
+#define SOC_SDW_AMP_IN_DAI_ID 3
+#define SOC_SDW_DMIC_DAI_ID 4
+
+#define SOC_SDW_DAI_TYPE_JACK 0
+#define SOC_SDW_DAI_TYPE_AMP 1
+#define SOC_SDW_DAI_TYPE_MIC 2
+
+struct asoc_sdw_codec_info;
+
+struct asoc_sdw_dai_info {
+ const bool direction[2]; /* playback & capture support */
+ const char *dai_name;
+ const int dai_type;
+ const int dailink[2]; /* dailink id for each direction */
+ const struct snd_kcontrol_new *controls;
+ const int num_controls;
+ const struct snd_soc_dapm_widget *widgets;
+ const int num_widgets;
+ int (*init)(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+ int (*exit)(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+ int (*rtd_init)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+ bool rtd_init_done; /* Indicate that the rtd_init callback is done */
+ unsigned long quirk;
+};
+
+struct asoc_sdw_codec_info {
+ const int part_id;
+ const int version_id;
+ const char *codec_name;
+ int amp_num;
+ const u8 acpi_id[ACPI_ID_LEN];
+ const bool ignore_internal_dmic;
+ const struct snd_soc_ops *ops;
+ struct asoc_sdw_dai_info dais[SOC_SDW_MAX_DAI_NUM];
+ const int dai_num;
+
+ int (*codec_card_late_probe)(struct snd_soc_card *card);
+
+ int (*count_sidecar)(struct snd_soc_card *card,
+ int *num_dais, int *num_devs);
+ int (*add_sidecar)(struct snd_soc_card *card,
+ struct snd_soc_dai_link **dai_links,
+ struct snd_soc_codec_conf **codec_conf);
+};
+
+struct asoc_sdw_mc_private {
+ struct snd_soc_card card;
+ struct snd_soc_jack sdw_headset;
+ struct device *headset_codec_dev; /* only one headset per card */
+ struct device *amp_dev1, *amp_dev2;
+ bool append_dai_type;
+ bool ignore_internal_dmic;
+ void *private;
+ unsigned long mc_quirk;
+ int codec_info_list_count;
+};
+
+struct asoc_sdw_endpoint {
+ struct list_head list;
+
+ u32 link_mask;
+ const char *codec_name;
+ const char *name_prefix;
+ bool include_sidecar;
+
+ struct asoc_sdw_codec_info *codec_info;
+ const struct asoc_sdw_dai_info *dai_info;
+};
+
+struct asoc_sdw_dailink {
+ bool initialised;
+
+ u8 group_id;
+ u32 link_mask[SNDRV_PCM_STREAM_LAST + 1];
+ int num_devs[SNDRV_PCM_STREAM_LAST + 1];
+ struct list_head endpoints;
+};
+
+extern struct asoc_sdw_codec_info codec_info_list[];
+int asoc_sdw_get_codec_info_list_count(void);
+
+int asoc_sdw_startup(struct snd_pcm_substream *substream);
+int asoc_sdw_prepare(struct snd_pcm_substream *substream);
+int asoc_sdw_prepare(struct snd_pcm_substream *substream);
+int asoc_sdw_trigger(struct snd_pcm_substream *substream, int cmd);
+int asoc_sdw_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+int asoc_sdw_hw_free(struct snd_pcm_substream *substream);
+void asoc_sdw_shutdown(struct snd_pcm_substream *substream);
+
+const char *asoc_sdw_get_codec_name(struct device *dev,
+ const struct asoc_sdw_codec_info *codec_info,
+ const struct snd_soc_acpi_link_adr *adr_link,
+ int adr_index);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_part(const u64 adr);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_acpi(const u8 *acpi_id);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_dai(const char *dai_name,
+ int *dai_index);
+
+struct snd_soc_dai_link *asoc_sdw_mc_find_codec_dai_used(struct snd_soc_card *card,
+ const char *dai_name);
+
+void asoc_sdw_mc_dailink_exit_loop(struct snd_soc_card *card);
+
+int asoc_sdw_card_late_probe(struct snd_soc_card *card);
+
+void asoc_sdw_init_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links,
+ int *be_id, char *name, int playback, int capture,
+ struct snd_soc_dai_link_component *cpus, int cpus_num,
+ struct snd_soc_dai_link_component *platform_component,
+ int num_platforms, struct snd_soc_dai_link_component *codecs,
+ int codecs_num, int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const struct snd_soc_ops *ops);
+
+int asoc_sdw_init_simple_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links,
+ int *be_id, char *name, int playback, int capture,
+ const char *cpu_dai_name, const char *platform_comp_name,
+ int num_platforms, const char *codec_name,
+ const char *codec_dai_name,
+ int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const struct snd_soc_ops *ops);
+
+int asoc_sdw_count_sdw_endpoints(struct snd_soc_card *card, int *num_devs, int *num_ends);
+
+struct asoc_sdw_dailink *asoc_sdw_find_dailink(struct asoc_sdw_dailink *dailinks,
+ const struct snd_soc_acpi_endpoint *new);
+
+int asoc_sdw_parse_sdw_endpoints(struct snd_soc_card *card,
+ struct asoc_sdw_dailink *soc_dais,
+ struct asoc_sdw_endpoint *soc_ends,
+ int *num_devs);
+
+int asoc_sdw_rtd_init(struct snd_soc_pcm_runtime *rtd);
+
+/* DMIC support */
+int asoc_sdw_dmic_init(struct snd_soc_pcm_runtime *rtd);
+
+/* RT711 support */
+int asoc_sdw_rt711_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt711_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* RT711-SDCA support */
+int asoc_sdw_rt_sdca_jack_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt_sdca_jack_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* RT1308 I2S support */
+extern const struct snd_soc_ops soc_sdw_rt1308_i2s_ops;
+
+/* generic amp support */
+int asoc_sdw_rt_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt_amp_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* CS42L43 support */
+int asoc_sdw_cs42l43_spk_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* CS AMP support */
+int asoc_sdw_bridge_cs35l56_count_sidecar(struct snd_soc_card *card,
+ int *num_dais, int *num_devs);
+int asoc_sdw_bridge_cs35l56_add_sidecar(struct snd_soc_card *card,
+ struct snd_soc_dai_link **dai_links,
+ struct snd_soc_codec_conf **codec_conf);
+int asoc_sdw_bridge_cs35l56_spk_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* MAXIM codec support */
+int asoc_sdw_maxim_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* dai_link init callbacks */
+int asoc_sdw_rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt_amp_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt700_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt711_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt712_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt722_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt5682_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l42_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_hs_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_maxim_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+
+#endif
diff --git a/include/sound/soundfont.h b/include/sound/soundfont.h
index 98ed98d89d6d..8a40cc15f66d 100644
--- a/include/sound/soundfont.h
+++ b/include/sound/soundfont.h
@@ -86,9 +86,11 @@ struct snd_sf_list {
};
/* Prototypes for soundfont.c */
-int snd_soundfont_load(struct snd_sf_list *sflist, const void __user *data,
+int snd_soundfont_load(struct snd_card *card,
+ struct snd_sf_list *sflist, const void __user *data,
long count, int client);
-int snd_soundfont_load_guspatch(struct snd_sf_list *sflist, const char __user *data,
+int snd_soundfont_load_guspatch(struct snd_card *card,
+ struct snd_sf_list *sflist, const char __user *data,
long count);
int snd_soundfont_close_check(struct snd_sf_list *sflist, int client);
diff --git a/include/sound/tas2563-tlv.h b/include/sound/tas2563-tlv.h
new file mode 100644
index 000000000000..bb269b21f460
--- /dev/null
+++ b/include/sound/tas2563-tlv.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2563 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2563 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2563 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+//
+
+#ifndef __TAS2563_TLV_H__
+#define __TAS2563_TLV_H__
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
+
+/* pow(10, db/20) * pow(2,30) */
+static const __maybe_unused unsigned char tas2563_dvc_table[][4] = {
+ { 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */
+ { 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */
+ { 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */
+ { 0X00, 0X00, 0X04, 0X31 }, /* -120.0db */
+ { 0X00, 0X00, 0X04, 0X71 }, /* -119.5db */
+ { 0X00, 0X00, 0X04, 0XB4 }, /* -119.0db */
+ { 0X00, 0X00, 0X04, 0XFC }, /* -118.5db */
+ { 0X00, 0X00, 0X05, 0X47 }, /* -118.0db */
+ { 0X00, 0X00, 0X05, 0X97 }, /* -117.5db */
+ { 0X00, 0X00, 0X05, 0XEC }, /* -117.0db */
+ { 0X00, 0X00, 0X06, 0X46 }, /* -116.5db */
+ { 0X00, 0X00, 0X06, 0XA5 }, /* -116.0db */
+ { 0X00, 0X00, 0X07, 0X0A }, /* -115.5db */
+ { 0X00, 0X00, 0X07, 0X75 }, /* -115.0db */
+ { 0X00, 0X00, 0X07, 0XE6 }, /* -114.5db */
+ { 0X00, 0X00, 0X08, 0X5E }, /* -114.0db */
+ { 0X00, 0X00, 0X08, 0XDD }, /* -113.5db */
+ { 0X00, 0X00, 0X09, 0X63 }, /* -113.0db */
+ { 0X00, 0X00, 0X09, 0XF2 }, /* -112.5db */
+ { 0X00, 0X00, 0X0A, 0X89 }, /* -112.0db */
+ { 0X00, 0X00, 0X0B, 0X28 }, /* -111.5db */
+ { 0X00, 0X00, 0X0B, 0XD2 }, /* -111.0db */
+ { 0X00, 0X00, 0X0C, 0X85 }, /* -110.5db */
+ { 0X00, 0X00, 0X0D, 0X43 }, /* -110.0db */
+ { 0X00, 0X00, 0X0E, 0X0C }, /* -109.5db */
+ { 0X00, 0X00, 0X0E, 0XE1 }, /* -109.0db */
+ { 0X00, 0X00, 0X0F, 0XC3 }, /* -108.5db */
+ { 0X00, 0X00, 0X10, 0XB2 }, /* -108.0db */
+ { 0X00, 0X00, 0X11, 0XAF }, /* -107.5db */
+ { 0X00, 0X00, 0X12, 0XBC }, /* -107.0db */
+ { 0X00, 0X00, 0X13, 0XD8 }, /* -106.5db */
+ { 0X00, 0X00, 0X15, 0X05 }, /* -106.0db */
+ { 0X00, 0X00, 0X16, 0X44 }, /* -105.5db */
+ { 0X00, 0X00, 0X17, 0X96 }, /* -105.0db */
+ { 0X00, 0X00, 0X18, 0XFB }, /* -104.5db */
+ { 0X00, 0X00, 0X1A, 0X76 }, /* -104.0db */
+ { 0X00, 0X00, 0X1C, 0X08 }, /* -103.5db */
+ { 0X00, 0X00, 0X1D, 0XB1 }, /* -103.0db */
+ { 0X00, 0X00, 0X1F, 0X73 }, /* -102.5db */
+ { 0X00, 0X00, 0X21, 0X51 }, /* -102.0db */
+ { 0X00, 0X00, 0X23, 0X4A }, /* -101.5db */
+ { 0X00, 0X00, 0X25, 0X61 }, /* -101.0db */
+ { 0X00, 0X00, 0X27, 0X98 }, /* -100.5db */
+ { 0X00, 0X00, 0X29, 0XF1 }, /* -100.0db */
+ { 0X00, 0X00, 0X2C, 0X6D }, /* -99.5db */
+ { 0X00, 0X00, 0X2F, 0X0F }, /* -99.0db */
+ { 0X00, 0X00, 0X31, 0XD9 }, /* -98.5db */
+ { 0X00, 0X00, 0X34, 0XCD }, /* -98.0db */
+ { 0X00, 0X00, 0X37, 0XEE }, /* -97.5db */
+ { 0X00, 0X00, 0X3B, 0X3F }, /* -97.0db */
+ { 0X00, 0X00, 0X3E, 0XC1 }, /* -96.5db */
+ { 0X00, 0X00, 0X42, 0X79 }, /* -96.0db */
+ { 0X00, 0X00, 0X46, 0X6A }, /* -95.5db */
+ { 0X00, 0X00, 0X4A, 0X96 }, /* -95.0db */
+ { 0X00, 0X00, 0X4F, 0X01 }, /* -94.5db */
+ { 0X00, 0X00, 0X53, 0XAF }, /* -94.0db */
+ { 0X00, 0X00, 0X58, 0XA5 }, /* -93.5db */
+ { 0X00, 0X00, 0X5D, 0XE6 }, /* -93.0db */
+ { 0X00, 0X00, 0X63, 0X76 }, /* -92.5db */
+ { 0X00, 0X00, 0X69, 0X5B }, /* -92.0db */
+ { 0X00, 0X00, 0X6F, 0X99 }, /* -91.5db */
+ { 0X00, 0X00, 0X76, 0X36 }, /* -91.0db */
+ { 0X00, 0X00, 0X7D, 0X37 }, /* -90.5db */
+ { 0X00, 0X00, 0X84, 0XA2 }, /* -90.0db */
+ { 0X00, 0X00, 0X8C, 0X7E }, /* -89.5db */
+ { 0X00, 0X00, 0X94, 0XD1 }, /* -89.0db */
+ { 0X00, 0X00, 0X9D, 0XA3 }, /* -88.5db */
+ { 0X00, 0X00, 0XA6, 0XFA }, /* -88.0db */
+ { 0X00, 0X00, 0XB0, 0XDF }, /* -87.5db */
+ { 0X00, 0X00, 0XBB, 0X5A }, /* -87.0db */
+ { 0X00, 0X00, 0XC6, 0X74 }, /* -86.5db */
+ { 0X00, 0X00, 0XD2, 0X36 }, /* -86.0db */
+ { 0X00, 0X00, 0XDE, 0XAB }, /* -85.5db */
+ { 0X00, 0X00, 0XEB, 0XDC }, /* -85.0db */
+ { 0X00, 0X00, 0XF9, 0XD6 }, /* -84.5db */
+ { 0X00, 0X01, 0X08, 0XA4 }, /* -84.0db */
+ { 0X00, 0X01, 0X18, 0X52 }, /* -83.5db */
+ { 0X00, 0X01, 0X28, 0XEF }, /* -83.0db */
+ { 0X00, 0X01, 0X3A, 0X87 }, /* -82.5db */
+ { 0X00, 0X01, 0X4D, 0X2A }, /* -82.0db */
+ { 0X00, 0X01, 0X60, 0XE8 }, /* -81.5db */
+ { 0X00, 0X01, 0X75, 0XD1 }, /* -81.0db */
+ { 0X00, 0X01, 0X8B, 0XF7 }, /* -80.5db */
+ { 0X00, 0X01, 0XA3, 0X6E }, /* -80.0db */
+ { 0X00, 0X01, 0XBC, 0X48 }, /* -79.5db */
+ { 0X00, 0X01, 0XD6, 0X9B }, /* -79.0db */
+ { 0X00, 0X01, 0XF2, 0X7E }, /* -78.5db */
+ { 0X00, 0X02, 0X10, 0X08 }, /* -78.0db */
+ { 0X00, 0X02, 0X2F, 0X51 }, /* -77.5db */
+ { 0X00, 0X02, 0X50, 0X76 }, /* -77.0db */
+ { 0X00, 0X02, 0X73, 0X91 }, /* -76.5db */
+ { 0X00, 0X02, 0X98, 0XC0 }, /* -76.0db */
+ { 0X00, 0X02, 0XC0, 0X24 }, /* -75.5db */
+ { 0X00, 0X02, 0XE9, 0XDD }, /* -75.0db */
+ { 0X00, 0X03, 0X16, 0X0F }, /* -74.5db */
+ { 0X00, 0X03, 0X44, 0XDF }, /* -74.0db */
+ { 0X00, 0X03, 0X76, 0X76 }, /* -73.5db */
+ { 0X00, 0X03, 0XAA, 0XFC }, /* -73.0db */
+ { 0X00, 0X03, 0XE2, 0XA0 }, /* -72.5db */
+ { 0X00, 0X04, 0X1D, 0X8F }, /* -72.0db */
+ { 0X00, 0X04, 0X5B, 0XFD }, /* -71.5db */
+ { 0X00, 0X04, 0X9E, 0X1D }, /* -71.0db */
+ { 0X00, 0X04, 0XE4, 0X29 }, /* -70.5db */
+ { 0X00, 0X05, 0X2E, 0X5A }, /* -70.0db */
+ { 0X00, 0X05, 0X7C, 0XF2 }, /* -69.5db */
+ { 0X00, 0X05, 0XD0, 0X31 }, /* -69.0db */
+ { 0X00, 0X06, 0X28, 0X60 }, /* -68.5db */
+ { 0X00, 0X06, 0X85, 0XC8 }, /* -68.0db */
+ { 0X00, 0X06, 0XE8, 0XB9 }, /* -67.5db */
+ { 0X00, 0X07, 0X51, 0X86 }, /* -67.0db */
+ { 0X00, 0X07, 0XC0, 0X8A }, /* -66.5db */
+ { 0X00, 0X08, 0X36, 0X21 }, /* -66.0db */
+ { 0X00, 0X08, 0XB2, 0XB0 }, /* -65.5db */
+ { 0X00, 0X09, 0X36, 0XA1 }, /* -65.0db */
+ { 0X00, 0X09, 0XC2, 0X63 }, /* -64.5db */
+ { 0X00, 0X0A, 0X56, 0X6D }, /* -64.0db */
+ { 0X00, 0X0A, 0XF3, 0X3C }, /* -63.5db */
+ { 0X00, 0X0B, 0X99, 0X56 }, /* -63.0db */
+ { 0X00, 0X0C, 0X49, 0X48 }, /* -62.5db */
+ { 0X00, 0X0D, 0X03, 0XA7 }, /* -62.0db */
+ { 0X00, 0X0D, 0XC9, 0X11 }, /* -61.5db */
+ { 0X00, 0X0E, 0X9A, 0X2D }, /* -61.0db */
+ { 0X00, 0X0F, 0X77, 0XAD }, /* -60.5db */
+ { 0X00, 0X10, 0X62, 0X4D }, /* -60.0db */
+ { 0X00, 0X11, 0X5A, 0XD5 }, /* -59.5db */
+ { 0X00, 0X12, 0X62, 0X16 }, /* -59.0db */
+ { 0X00, 0X13, 0X78, 0XF0 }, /* -58.5db */
+ { 0X00, 0X14, 0XA0, 0X50 }, /* -58.0db */
+ { 0X00, 0X15, 0XD9, 0X31 }, /* -57.5db */
+ { 0X00, 0X17, 0X24, 0X9C }, /* -57.0db */
+ { 0X00, 0X18, 0X83, 0XAA }, /* -56.5db */
+ { 0X00, 0X19, 0XF7, 0X86 }, /* -56.0db */
+ { 0X00, 0X1B, 0X81, 0X6A }, /* -55.5db */
+ { 0X00, 0X1D, 0X22, 0XA4 }, /* -55.0db */
+ { 0X00, 0X1E, 0XDC, 0X98 }, /* -54.5db */
+ { 0X00, 0X20, 0XB0, 0XBC }, /* -54.0db */
+ { 0X00, 0X22, 0XA0, 0X9D }, /* -53.5db */
+ { 0X00, 0X24, 0XAD, 0XE0 }, /* -53.0db */
+ { 0X00, 0X26, 0XDA, 0X43 }, /* -52.5db */
+ { 0X00, 0X29, 0X27, 0X9D }, /* -52.0db */
+ { 0X00, 0X2B, 0X97, 0XE3 }, /* -51.5db */
+ { 0X00, 0X2E, 0X2D, 0X27 }, /* -51.0db */
+ { 0X00, 0X30, 0XE9, 0X9A }, /* -50.5db */
+ { 0X00, 0X33, 0XCF, 0X8D }, /* -50.0db */
+ { 0X00, 0X36, 0XE1, 0X78 }, /* -49.5db */
+ { 0X00, 0X3A, 0X21, 0XF3 }, /* -49.0db */
+ { 0X00, 0X3D, 0X93, 0XC3 }, /* -48.5db */
+ { 0X00, 0X41, 0X39, 0XD3 }, /* -48.0db */
+ { 0X00, 0X45, 0X17, 0X3B }, /* -47.5db */
+ { 0X00, 0X49, 0X2F, 0X44 }, /* -47.0db */
+ { 0X00, 0X4D, 0X85, 0X66 }, /* -46.5db */
+ { 0X00, 0X52, 0X1D, 0X50 }, /* -46.0db */
+ { 0X00, 0X56, 0XFA, 0XE8 }, /* -45.5db */
+ { 0X00, 0X5C, 0X22, 0X4E }, /* -45.0db */
+ { 0X00, 0X61, 0X97, 0XE1 }, /* -44.5db */
+ { 0X00, 0X67, 0X60, 0X44 }, /* -44.0db */
+ { 0X00, 0X6D, 0X80, 0X60 }, /* -43.5db */
+ { 0X00, 0X73, 0XFD, 0X65 }, /* -43.0db */
+ { 0X00, 0X7A, 0XDC, 0XD7 }, /* -42.5db */
+ { 0X00, 0X82, 0X24, 0X8A }, /* -42.0db */
+ { 0X00, 0X89, 0XDA, 0XAB }, /* -41.5db */
+ { 0X00, 0X92, 0X05, 0XC6 }, /* -41.0db */
+ { 0X00, 0X9A, 0XAC, 0XC8 }, /* -40.5db */
+ { 0X00, 0XA3, 0XD7, 0X0A }, /* -40.0db */
+ { 0X00, 0XAD, 0X8C, 0X52 }, /* -39.5db */
+ { 0X00, 0XB7, 0XD4, 0XDD }, /* -39.0db */
+ { 0X00, 0XC2, 0XB9, 0X65 }, /* -38.5db */
+ { 0X00, 0XCE, 0X43, 0X28 }, /* -38.0db */
+ { 0X00, 0XDA, 0X7B, 0XF1 }, /* -37.5db */
+ { 0X00, 0XE7, 0X6E, 0X1E }, /* -37.0db */
+ { 0X00, 0XF5, 0X24, 0XAC }, /* -36.5db */
+ { 0X01, 0X03, 0XAB, 0X3D }, /* -36.0db */
+ { 0X01, 0X13, 0X0E, 0X24 }, /* -35.5db */
+ { 0X01, 0X23, 0X5A, 0X71 }, /* -35.0db */
+ { 0X01, 0X34, 0X9D, 0XF8 }, /* -34.5db */
+ { 0X01, 0X46, 0XE7, 0X5D }, /* -34.0db */
+ { 0X01, 0X5A, 0X46, 0X27 }, /* -33.5db */
+ { 0X01, 0X6E, 0XCA, 0XC5 }, /* -33.0db */
+ { 0X01, 0X84, 0X86, 0X9F }, /* -32.5db */
+ { 0X01, 0X9B, 0X8C, 0X27 }, /* -32.0db */
+ { 0X01, 0XB3, 0XEE, 0XE5 }, /* -31.5db */
+ { 0X01, 0XCD, 0XC3, 0X8C }, /* -31.0db */
+ { 0X01, 0XE9, 0X20, 0X05 }, /* -30.5db */
+ { 0X02, 0X06, 0X1B, 0X89 }, /* -30.0db */
+ { 0X02, 0X24, 0XCE, 0XB0 }, /* -29.5db */
+ { 0X02, 0X45, 0X53, 0X85 }, /* -29.0db */
+ { 0X02, 0X67, 0XC5, 0XA2 }, /* -28.5db */
+ { 0X02, 0X8C, 0X42, 0X3F }, /* -28.0db */
+ { 0X02, 0XB2, 0XE8, 0X55 }, /* -27.5db */
+ { 0X02, 0XDB, 0XD8, 0XAD }, /* -27.0db */
+ { 0X03, 0X07, 0X36, 0X05 }, /* -26.5db */
+ { 0X03, 0X35, 0X25, 0X29 }, /* -26.0db */
+ { 0X03, 0X65, 0XCD, 0X13 }, /* -25.5db */
+ { 0X03, 0X99, 0X57, 0X0C }, /* -25.0db */
+ { 0X03, 0XCF, 0XEE, 0XCF }, /* -24.5db */
+ { 0X04, 0X09, 0XC2, 0XB0 }, /* -24.0db */
+ { 0X04, 0X47, 0X03, 0XC1 }, /* -23.5db */
+ { 0X04, 0X87, 0XE5, 0XFB }, /* -23.0db */
+ { 0X04, 0XCC, 0XA0, 0X6D }, /* -22.5db */
+ { 0X05, 0X15, 0X6D, 0X68 }, /* -22.0db */
+ { 0X05, 0X62, 0X8A, 0XB3 }, /* -21.5db */
+ { 0X05, 0XB4, 0X39, 0XBC }, /* -21.0db */
+ { 0X06, 0X0A, 0XBF, 0XD4 }, /* -20.5db */
+ { 0X06, 0X66, 0X66, 0X66 }, /* -20.0db */
+ { 0X06, 0XC7, 0X7B, 0X36 }, /* -19.5db */
+ { 0X07, 0X2E, 0X50, 0XA6 }, /* -19.0db */
+ { 0X07, 0X9B, 0X3D, 0XF6 }, /* -18.5db */
+ { 0X08, 0X0E, 0X9F, 0X96 }, /* -18.0db */
+ { 0X08, 0X88, 0XD7, 0X6D }, /* -17.5db */
+ { 0X09, 0X0A, 0X4D, 0X2F }, /* -17.0db */
+ { 0X09, 0X93, 0X6E, 0XB8 }, /* -16.5db */
+ { 0X0A, 0X24, 0XB0, 0X62 }, /* -16.0db */
+ { 0X0A, 0XBE, 0X8D, 0X70 }, /* -15.5db */
+ { 0X0B, 0X61, 0X88, 0X71 }, /* -15.0db */
+ { 0X0C, 0X0E, 0X2B, 0XB0 }, /* -14.5db */
+ { 0X0C, 0XC5, 0X09, 0XAB }, /* -14.0db */
+ { 0X0D, 0X86, 0XBD, 0X8D }, /* -13.5db */
+ { 0X0E, 0X53, 0XEB, 0XB3 }, /* -13.0db */
+ { 0X0F, 0X2D, 0X42, 0X38 }, /* -12.5db */
+ { 0X10, 0X13, 0X79, 0X87 }, /* -12.0db */
+ { 0X11, 0X07, 0X54, 0XF9 }, /* -11.5db */
+ { 0X12, 0X09, 0XA3, 0X7A }, /* -11.0db */
+ { 0X13, 0X1B, 0X40, 0X39 }, /* -10.5db */
+ { 0X14, 0X3D, 0X13, 0X62 }, /* -10.0db */
+ { 0X15, 0X70, 0X12, 0XE1 }, /* -9.5db */
+ { 0X16, 0XB5, 0X43, 0X37 }, /* -9.0db */
+ { 0X18, 0X0D, 0XB8, 0X54 }, /* -8.5db */
+ { 0X19, 0X7A, 0X96, 0X7F }, /* -8.0db */
+ { 0X1A, 0XFD, 0X13, 0X54 }, /* -7.5db */
+ { 0X1C, 0X96, 0X76, 0XC6 }, /* -7.0db */
+ { 0X1E, 0X48, 0X1C, 0X37 }, /* -6.5db */
+ { 0X20, 0X13, 0X73, 0X9E }, /* -6.0db */
+ { 0X21, 0XFA, 0X02, 0XBF }, /* -5.5db */
+ { 0X23, 0XFD, 0X66, 0X78 }, /* -5.0db */
+ { 0X26, 0X1F, 0X54, 0X1C }, /* -4.5db */
+ { 0X28, 0X61, 0X9A, 0XE9 }, /* -4.0db */
+ { 0X2A, 0XC6, 0X25, 0X91 }, /* -3.5db */
+ { 0X2D, 0X4E, 0XFB, 0XD5 }, /* -3.0db */
+ { 0X2F, 0XFE, 0X44, 0X48 }, /* -2.5db */
+ { 0X32, 0XD6, 0X46, 0X17 }, /* -2.0db */
+ { 0X35, 0XD9, 0X6B, 0X02 }, /* -1.5db */
+ { 0X39, 0X0A, 0X41, 0X5F }, /* -1.0db */
+ { 0X3C, 0X6B, 0X7E, 0X4F }, /* -0.5db */
+ { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */
+ { 0X43, 0XCA, 0XD0, 0X22 }, /* 0.5db */
+ { 0X47, 0XCF, 0X26, 0X7D }, /* 1.0db */
+ { 0X4C, 0X10, 0X6B, 0XA5 }, /* 1.5db */
+ { 0X50, 0X92, 0X3B, 0XE3 }, /* 2.0db */
+ { 0X55, 0X58, 0X6A, 0X46 }, /* 2.5db */
+ { 0X5A, 0X67, 0X03, 0XDF }, /* 3.0db */
+ { 0X5F, 0XC2, 0X53, 0X32 }, /* 3.5db */
+ { 0X65, 0X6E, 0XE3, 0XDB }, /* 4.0db */
+ { 0X6B, 0X71, 0X86, 0X68 }, /* 4.5db */
+ { 0X71, 0XCF, 0X54, 0X71 }, /* 5.0db */
+ { 0X78, 0X8D, 0XB4, 0XE9 }, /* 5.5db */
+ { 0X7F, 0XFF, 0XFF, 0XFF }, /* 6.0db */
+};
+#endif
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
index 00fd4d449ff3..d87263e43fdb 100644
--- a/include/sound/tas2781-tlv.h
+++ b/include/sound/tas2781-tlv.h
@@ -17,265 +17,5 @@
static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
-static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
-/* pow(10, db/20) * pow(2,30) */
-static const __maybe_unused unsigned char tas2563_dvc_table[][4] = {
- { 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */
- { 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */
- { 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */
- { 0X00, 0X00, 0X04, 0X31 }, /* -120.0db */
- { 0X00, 0X00, 0X04, 0X71 }, /* -119.5db */
- { 0X00, 0X00, 0X04, 0XB4 }, /* -119.0db */
- { 0X00, 0X00, 0X04, 0XFC }, /* -118.5db */
- { 0X00, 0X00, 0X05, 0X47 }, /* -118.0db */
- { 0X00, 0X00, 0X05, 0X97 }, /* -117.5db */
- { 0X00, 0X00, 0X05, 0XEC }, /* -117.0db */
- { 0X00, 0X00, 0X06, 0X46 }, /* -116.5db */
- { 0X00, 0X00, 0X06, 0XA5 }, /* -116.0db */
- { 0X00, 0X00, 0X07, 0X0A }, /* -115.5db */
- { 0X00, 0X00, 0X07, 0X75 }, /* -115.0db */
- { 0X00, 0X00, 0X07, 0XE6 }, /* -114.5db */
- { 0X00, 0X00, 0X08, 0X5E }, /* -114.0db */
- { 0X00, 0X00, 0X08, 0XDD }, /* -113.5db */
- { 0X00, 0X00, 0X09, 0X63 }, /* -113.0db */
- { 0X00, 0X00, 0X09, 0XF2 }, /* -112.5db */
- { 0X00, 0X00, 0X0A, 0X89 }, /* -112.0db */
- { 0X00, 0X00, 0X0B, 0X28 }, /* -111.5db */
- { 0X00, 0X00, 0X0B, 0XD2 }, /* -111.0db */
- { 0X00, 0X00, 0X0C, 0X85 }, /* -110.5db */
- { 0X00, 0X00, 0X0D, 0X43 }, /* -110.0db */
- { 0X00, 0X00, 0X0E, 0X0C }, /* -109.5db */
- { 0X00, 0X00, 0X0E, 0XE1 }, /* -109.0db */
- { 0X00, 0X00, 0X0F, 0XC3 }, /* -108.5db */
- { 0X00, 0X00, 0X10, 0XB2 }, /* -108.0db */
- { 0X00, 0X00, 0X11, 0XAF }, /* -107.5db */
- { 0X00, 0X00, 0X12, 0XBC }, /* -107.0db */
- { 0X00, 0X00, 0X13, 0XD8 }, /* -106.5db */
- { 0X00, 0X00, 0X15, 0X05 }, /* -106.0db */
- { 0X00, 0X00, 0X16, 0X44 }, /* -105.5db */
- { 0X00, 0X00, 0X17, 0X96 }, /* -105.0db */
- { 0X00, 0X00, 0X18, 0XFB }, /* -104.5db */
- { 0X00, 0X00, 0X1A, 0X76 }, /* -104.0db */
- { 0X00, 0X00, 0X1C, 0X08 }, /* -103.5db */
- { 0X00, 0X00, 0X1D, 0XB1 }, /* -103.0db */
- { 0X00, 0X00, 0X1F, 0X73 }, /* -102.5db */
- { 0X00, 0X00, 0X21, 0X51 }, /* -102.0db */
- { 0X00, 0X00, 0X23, 0X4A }, /* -101.5db */
- { 0X00, 0X00, 0X25, 0X61 }, /* -101.0db */
- { 0X00, 0X00, 0X27, 0X98 }, /* -100.5db */
- { 0X00, 0X00, 0X29, 0XF1 }, /* -100.0db */
- { 0X00, 0X00, 0X2C, 0X6D }, /* -99.5db */
- { 0X00, 0X00, 0X2F, 0X0F }, /* -99.0db */
- { 0X00, 0X00, 0X31, 0XD9 }, /* -98.5db */
- { 0X00, 0X00, 0X34, 0XCD }, /* -98.0db */
- { 0X00, 0X00, 0X37, 0XEE }, /* -97.5db */
- { 0X00, 0X00, 0X3B, 0X3F }, /* -97.0db */
- { 0X00, 0X00, 0X3E, 0XC1 }, /* -96.5db */
- { 0X00, 0X00, 0X42, 0X79 }, /* -96.0db */
- { 0X00, 0X00, 0X46, 0X6A }, /* -95.5db */
- { 0X00, 0X00, 0X4A, 0X96 }, /* -95.0db */
- { 0X00, 0X00, 0X4F, 0X01 }, /* -94.5db */
- { 0X00, 0X00, 0X53, 0XAF }, /* -94.0db */
- { 0X00, 0X00, 0X58, 0XA5 }, /* -93.5db */
- { 0X00, 0X00, 0X5D, 0XE6 }, /* -93.0db */
- { 0X00, 0X00, 0X63, 0X76 }, /* -92.5db */
- { 0X00, 0X00, 0X69, 0X5B }, /* -92.0db */
- { 0X00, 0X00, 0X6F, 0X99 }, /* -91.5db */
- { 0X00, 0X00, 0X76, 0X36 }, /* -91.0db */
- { 0X00, 0X00, 0X7D, 0X37 }, /* -90.5db */
- { 0X00, 0X00, 0X84, 0XA2 }, /* -90.0db */
- { 0X00, 0X00, 0X8C, 0X7E }, /* -89.5db */
- { 0X00, 0X00, 0X94, 0XD1 }, /* -89.0db */
- { 0X00, 0X00, 0X9D, 0XA3 }, /* -88.5db */
- { 0X00, 0X00, 0XA6, 0XFA }, /* -88.0db */
- { 0X00, 0X00, 0XB0, 0XDF }, /* -87.5db */
- { 0X00, 0X00, 0XBB, 0X5A }, /* -87.0db */
- { 0X00, 0X00, 0XC6, 0X74 }, /* -86.5db */
- { 0X00, 0X00, 0XD2, 0X36 }, /* -86.0db */
- { 0X00, 0X00, 0XDE, 0XAB }, /* -85.5db */
- { 0X00, 0X00, 0XEB, 0XDC }, /* -85.0db */
- { 0X00, 0X00, 0XF9, 0XD6 }, /* -84.5db */
- { 0X00, 0X01, 0X08, 0XA4 }, /* -84.0db */
- { 0X00, 0X01, 0X18, 0X52 }, /* -83.5db */
- { 0X00, 0X01, 0X28, 0XEF }, /* -83.0db */
- { 0X00, 0X01, 0X3A, 0X87 }, /* -82.5db */
- { 0X00, 0X01, 0X4D, 0X2A }, /* -82.0db */
- { 0X00, 0X01, 0X60, 0XE8 }, /* -81.5db */
- { 0X00, 0X01, 0X75, 0XD1 }, /* -81.0db */
- { 0X00, 0X01, 0X8B, 0XF7 }, /* -80.5db */
- { 0X00, 0X01, 0XA3, 0X6E }, /* -80.0db */
- { 0X00, 0X01, 0XBC, 0X48 }, /* -79.5db */
- { 0X00, 0X01, 0XD6, 0X9B }, /* -79.0db */
- { 0X00, 0X01, 0XF2, 0X7E }, /* -78.5db */
- { 0X00, 0X02, 0X10, 0X08 }, /* -78.0db */
- { 0X00, 0X02, 0X2F, 0X51 }, /* -77.5db */
- { 0X00, 0X02, 0X50, 0X76 }, /* -77.0db */
- { 0X00, 0X02, 0X73, 0X91 }, /* -76.5db */
- { 0X00, 0X02, 0X98, 0XC0 }, /* -76.0db */
- { 0X00, 0X02, 0XC0, 0X24 }, /* -75.5db */
- { 0X00, 0X02, 0XE9, 0XDD }, /* -75.0db */
- { 0X00, 0X03, 0X16, 0X0F }, /* -74.5db */
- { 0X00, 0X03, 0X44, 0XDF }, /* -74.0db */
- { 0X00, 0X03, 0X76, 0X76 }, /* -73.5db */
- { 0X00, 0X03, 0XAA, 0XFC }, /* -73.0db */
- { 0X00, 0X03, 0XE2, 0XA0 }, /* -72.5db */
- { 0X00, 0X04, 0X1D, 0X8F }, /* -72.0db */
- { 0X00, 0X04, 0X5B, 0XFD }, /* -71.5db */
- { 0X00, 0X04, 0X9E, 0X1D }, /* -71.0db */
- { 0X00, 0X04, 0XE4, 0X29 }, /* -70.5db */
- { 0X00, 0X05, 0X2E, 0X5A }, /* -70.0db */
- { 0X00, 0X05, 0X7C, 0XF2 }, /* -69.5db */
- { 0X00, 0X05, 0XD0, 0X31 }, /* -69.0db */
- { 0X00, 0X06, 0X28, 0X60 }, /* -68.5db */
- { 0X00, 0X06, 0X85, 0XC8 }, /* -68.0db */
- { 0X00, 0X06, 0XE8, 0XB9 }, /* -67.5db */
- { 0X00, 0X07, 0X51, 0X86 }, /* -67.0db */
- { 0X00, 0X07, 0XC0, 0X8A }, /* -66.5db */
- { 0X00, 0X08, 0X36, 0X21 }, /* -66.0db */
- { 0X00, 0X08, 0XB2, 0XB0 }, /* -65.5db */
- { 0X00, 0X09, 0X36, 0XA1 }, /* -65.0db */
- { 0X00, 0X09, 0XC2, 0X63 }, /* -64.5db */
- { 0X00, 0X0A, 0X56, 0X6D }, /* -64.0db */
- { 0X00, 0X0A, 0XF3, 0X3C }, /* -63.5db */
- { 0X00, 0X0B, 0X99, 0X56 }, /* -63.0db */
- { 0X00, 0X0C, 0X49, 0X48 }, /* -62.5db */
- { 0X00, 0X0D, 0X03, 0XA7 }, /* -62.0db */
- { 0X00, 0X0D, 0XC9, 0X11 }, /* -61.5db */
- { 0X00, 0X0E, 0X9A, 0X2D }, /* -61.0db */
- { 0X00, 0X0F, 0X77, 0XAD }, /* -60.5db */
- { 0X00, 0X10, 0X62, 0X4D }, /* -60.0db */
- { 0X00, 0X11, 0X5A, 0XD5 }, /* -59.5db */
- { 0X00, 0X12, 0X62, 0X16 }, /* -59.0db */
- { 0X00, 0X13, 0X78, 0XF0 }, /* -58.5db */
- { 0X00, 0X14, 0XA0, 0X50 }, /* -58.0db */
- { 0X00, 0X15, 0XD9, 0X31 }, /* -57.5db */
- { 0X00, 0X17, 0X24, 0X9C }, /* -57.0db */
- { 0X00, 0X18, 0X83, 0XAA }, /* -56.5db */
- { 0X00, 0X19, 0XF7, 0X86 }, /* -56.0db */
- { 0X00, 0X1B, 0X81, 0X6A }, /* -55.5db */
- { 0X00, 0X1D, 0X22, 0XA4 }, /* -55.0db */
- { 0X00, 0X1E, 0XDC, 0X98 }, /* -54.5db */
- { 0X00, 0X20, 0XB0, 0XBC }, /* -54.0db */
- { 0X00, 0X22, 0XA0, 0X9D }, /* -53.5db */
- { 0X00, 0X24, 0XAD, 0XE0 }, /* -53.0db */
- { 0X00, 0X26, 0XDA, 0X43 }, /* -52.5db */
- { 0X00, 0X29, 0X27, 0X9D }, /* -52.0db */
- { 0X00, 0X2B, 0X97, 0XE3 }, /* -51.5db */
- { 0X00, 0X2E, 0X2D, 0X27 }, /* -51.0db */
- { 0X00, 0X30, 0XE9, 0X9A }, /* -50.5db */
- { 0X00, 0X33, 0XCF, 0X8D }, /* -50.0db */
- { 0X00, 0X36, 0XE1, 0X78 }, /* -49.5db */
- { 0X00, 0X3A, 0X21, 0XF3 }, /* -49.0db */
- { 0X00, 0X3D, 0X93, 0XC3 }, /* -48.5db */
- { 0X00, 0X41, 0X39, 0XD3 }, /* -48.0db */
- { 0X00, 0X45, 0X17, 0X3B }, /* -47.5db */
- { 0X00, 0X49, 0X2F, 0X44 }, /* -47.0db */
- { 0X00, 0X4D, 0X85, 0X66 }, /* -46.5db */
- { 0X00, 0X52, 0X1D, 0X50 }, /* -46.0db */
- { 0X00, 0X56, 0XFA, 0XE8 }, /* -45.5db */
- { 0X00, 0X5C, 0X22, 0X4E }, /* -45.0db */
- { 0X00, 0X61, 0X97, 0XE1 }, /* -44.5db */
- { 0X00, 0X67, 0X60, 0X44 }, /* -44.0db */
- { 0X00, 0X6D, 0X80, 0X60 }, /* -43.5db */
- { 0X00, 0X73, 0XFD, 0X65 }, /* -43.0db */
- { 0X00, 0X7A, 0XDC, 0XD7 }, /* -42.5db */
- { 0X00, 0X82, 0X24, 0X8A }, /* -42.0db */
- { 0X00, 0X89, 0XDA, 0XAB }, /* -41.5db */
- { 0X00, 0X92, 0X05, 0XC6 }, /* -41.0db */
- { 0X00, 0X9A, 0XAC, 0XC8 }, /* -40.5db */
- { 0X00, 0XA3, 0XD7, 0X0A }, /* -40.0db */
- { 0X00, 0XAD, 0X8C, 0X52 }, /* -39.5db */
- { 0X00, 0XB7, 0XD4, 0XDD }, /* -39.0db */
- { 0X00, 0XC2, 0XB9, 0X65 }, /* -38.5db */
- { 0X00, 0XCE, 0X43, 0X28 }, /* -38.0db */
- { 0X00, 0XDA, 0X7B, 0XF1 }, /* -37.5db */
- { 0X00, 0XE7, 0X6E, 0X1E }, /* -37.0db */
- { 0X00, 0XF5, 0X24, 0XAC }, /* -36.5db */
- { 0X01, 0X03, 0XAB, 0X3D }, /* -36.0db */
- { 0X01, 0X13, 0X0E, 0X24 }, /* -35.5db */
- { 0X01, 0X23, 0X5A, 0X71 }, /* -35.0db */
- { 0X01, 0X34, 0X9D, 0XF8 }, /* -34.5db */
- { 0X01, 0X46, 0XE7, 0X5D }, /* -34.0db */
- { 0X01, 0X5A, 0X46, 0X27 }, /* -33.5db */
- { 0X01, 0X6E, 0XCA, 0XC5 }, /* -33.0db */
- { 0X01, 0X84, 0X86, 0X9F }, /* -32.5db */
- { 0X01, 0X9B, 0X8C, 0X27 }, /* -32.0db */
- { 0X01, 0XB3, 0XEE, 0XE5 }, /* -31.5db */
- { 0X01, 0XCD, 0XC3, 0X8C }, /* -31.0db */
- { 0X01, 0XE9, 0X20, 0X05 }, /* -30.5db */
- { 0X02, 0X06, 0X1B, 0X89 }, /* -30.0db */
- { 0X02, 0X24, 0XCE, 0XB0 }, /* -29.5db */
- { 0X02, 0X45, 0X53, 0X85 }, /* -29.0db */
- { 0X02, 0X67, 0XC5, 0XA2 }, /* -28.5db */
- { 0X02, 0X8C, 0X42, 0X3F }, /* -28.0db */
- { 0X02, 0XB2, 0XE8, 0X55 }, /* -27.5db */
- { 0X02, 0XDB, 0XD8, 0XAD }, /* -27.0db */
- { 0X03, 0X07, 0X36, 0X05 }, /* -26.5db */
- { 0X03, 0X35, 0X25, 0X29 }, /* -26.0db */
- { 0X03, 0X65, 0XCD, 0X13 }, /* -25.5db */
- { 0X03, 0X99, 0X57, 0X0C }, /* -25.0db */
- { 0X03, 0XCF, 0XEE, 0XCF }, /* -24.5db */
- { 0X04, 0X09, 0XC2, 0XB0 }, /* -24.0db */
- { 0X04, 0X47, 0X03, 0XC1 }, /* -23.5db */
- { 0X04, 0X87, 0XE5, 0XFB }, /* -23.0db */
- { 0X04, 0XCC, 0XA0, 0X6D }, /* -22.5db */
- { 0X05, 0X15, 0X6D, 0X68 }, /* -22.0db */
- { 0X05, 0X62, 0X8A, 0XB3 }, /* -21.5db */
- { 0X05, 0XB4, 0X39, 0XBC }, /* -21.0db */
- { 0X06, 0X0A, 0XBF, 0XD4 }, /* -20.5db */
- { 0X06, 0X66, 0X66, 0X66 }, /* -20.0db */
- { 0X06, 0XC7, 0X7B, 0X36 }, /* -19.5db */
- { 0X07, 0X2E, 0X50, 0XA6 }, /* -19.0db */
- { 0X07, 0X9B, 0X3D, 0XF6 }, /* -18.5db */
- { 0X08, 0X0E, 0X9F, 0X96 }, /* -18.0db */
- { 0X08, 0X88, 0XD7, 0X6D }, /* -17.5db */
- { 0X09, 0X0A, 0X4D, 0X2F }, /* -17.0db */
- { 0X09, 0X93, 0X6E, 0XB8 }, /* -16.5db */
- { 0X0A, 0X24, 0XB0, 0X62 }, /* -16.0db */
- { 0X0A, 0XBE, 0X8D, 0X70 }, /* -15.5db */
- { 0X0B, 0X61, 0X88, 0X71 }, /* -15.0db */
- { 0X0C, 0X0E, 0X2B, 0XB0 }, /* -14.5db */
- { 0X0C, 0XC5, 0X09, 0XAB }, /* -14.0db */
- { 0X0D, 0X86, 0XBD, 0X8D }, /* -13.5db */
- { 0X0E, 0X53, 0XEB, 0XB3 }, /* -13.0db */
- { 0X0F, 0X2D, 0X42, 0X38 }, /* -12.5db */
- { 0X10, 0X13, 0X79, 0X87 }, /* -12.0db */
- { 0X11, 0X07, 0X54, 0XF9 }, /* -11.5db */
- { 0X12, 0X09, 0XA3, 0X7A }, /* -11.0db */
- { 0X13, 0X1B, 0X40, 0X39 }, /* -10.5db */
- { 0X14, 0X3D, 0X13, 0X62 }, /* -10.0db */
- { 0X15, 0X70, 0X12, 0XE1 }, /* -9.5db */
- { 0X16, 0XB5, 0X43, 0X37 }, /* -9.0db */
- { 0X18, 0X0D, 0XB8, 0X54 }, /* -8.5db */
- { 0X19, 0X7A, 0X96, 0X7F }, /* -8.0db */
- { 0X1A, 0XFD, 0X13, 0X54 }, /* -7.5db */
- { 0X1C, 0X96, 0X76, 0XC6 }, /* -7.0db */
- { 0X1E, 0X48, 0X1C, 0X37 }, /* -6.5db */
- { 0X20, 0X13, 0X73, 0X9E }, /* -6.0db */
- { 0X21, 0XFA, 0X02, 0XBF }, /* -5.5db */
- { 0X23, 0XFD, 0X66, 0X78 }, /* -5.0db */
- { 0X26, 0X1F, 0X54, 0X1C }, /* -4.5db */
- { 0X28, 0X61, 0X9A, 0XE9 }, /* -4.0db */
- { 0X2A, 0XC6, 0X25, 0X91 }, /* -3.5db */
- { 0X2D, 0X4E, 0XFB, 0XD5 }, /* -3.0db */
- { 0X2F, 0XFE, 0X44, 0X48 }, /* -2.5db */
- { 0X32, 0XD6, 0X46, 0X17 }, /* -2.0db */
- { 0X35, 0XD9, 0X6B, 0X02 }, /* -1.5db */
- { 0X39, 0X0A, 0X41, 0X5F }, /* -1.0db */
- { 0X3C, 0X6B, 0X7E, 0X4F }, /* -0.5db */
- { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */
- { 0X43, 0XCA, 0XD0, 0X22 }, /* 0.5db */
- { 0X47, 0XCF, 0X26, 0X7D }, /* 1.0db */
- { 0X4C, 0X10, 0X6B, 0XA5 }, /* 1.5db */
- { 0X50, 0X92, 0X3B, 0XE3 }, /* 2.0db */
- { 0X55, 0X58, 0X6A, 0X46 }, /* 2.5db */
- { 0X5A, 0X67, 0X03, 0XDF }, /* 3.0db */
- { 0X5F, 0XC2, 0X53, 0X32 }, /* 3.5db */
- { 0X65, 0X6E, 0XE3, 0XDB }, /* 4.0db */
- { 0X6B, 0X71, 0X86, 0X68 }, /* 4.5db */
- { 0X71, 0XCF, 0X54, 0X71 }, /* 5.0db */
- { 0X78, 0X8D, 0XB4, 0XE9 }, /* 5.5db */
- { 0XFF, 0XFF, 0XFF, 0XFF }, /* 6.0db */
-};
#endif
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 18161d02a96f..8cd6da0480b7 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -49,12 +49,59 @@
/*I2C Checksum */
#define TASDEVICE_I2CChecksum TASDEVICE_REG(0x0, 0x0, 0x7E)
+/* XM_340 */
+#define TASDEVICE_XM_A1_REG TASDEVICE_REG(0x64, 0x63, 0x3c)
+/* XM_341 */
+#define TASDEVICE_XM_A2_REG TASDEVICE_REG(0x64, 0x63, 0x38)
+
/* Volume control */
#define TAS2563_DVC_LVL TASDEVICE_REG(0x00, 0x02, 0x0C)
#define TAS2781_DVC_LVL TASDEVICE_REG(0x0, 0x0, 0x1A)
#define TAS2781_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03)
#define TAS2781_AMP_LEVEL_MASK GENMASK(5, 1)
+#define TAS2563_IDLE TASDEVICE_REG(0x00, 0x00, 0x3e)
+#define TAS2563_PRM_R0_REG TASDEVICE_REG(0x00, 0x0f, 0x34)
+
+#define TAS2563_RUNTIME_RE_REG_TF TASDEVICE_REG(0x64, 0x02, 0x70)
+#define TAS2563_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x02, 0x48)
+
+#define TAS2563_PRM_ENFF_REG TASDEVICE_REG(0x00, 0x0d, 0x54)
+#define TAS2563_PRM_DISTCK_REG TASDEVICE_REG(0x00, 0x0d, 0x58)
+#define TAS2563_PRM_TE_SCTHR_REG TASDEVICE_REG(0x00, 0x0f, 0x60)
+#define TAS2563_PRM_PLT_FLAG_REG TASDEVICE_REG(0x00, 0x0d, 0x74)
+#define TAS2563_PRM_SINEGAIN_REG TASDEVICE_REG(0x00, 0x0d, 0x7c)
+/* prm_Int_B0 */
+#define TAS2563_TE_TA1_REG TASDEVICE_REG(0x00, 0x10, 0x0c)
+/* prm_Int_A1 */
+#define TAS2563_TE_TA1_AT_REG TASDEVICE_REG(0x00, 0x10, 0x10)
+/* prm_TE_Beta */
+#define TAS2563_TE_TA2_REG TASDEVICE_REG(0x00, 0x0f, 0x64)
+/* prm_TE_Beta1 */
+#define TAS2563_TE_AT_REG TASDEVICE_REG(0x00, 0x0f, 0x68)
+/* prm_TE_1_Beta1 */
+#define TAS2563_TE_DT_REG TASDEVICE_REG(0x00, 0x0f, 0x70)
+
+#define TAS2781_PRM_INT_MASK_REG TASDEVICE_REG(0x00, 0x00, 0x3b)
+#define TAS2781_PRM_CLK_CFG_REG TASDEVICE_REG(0x00, 0x00, 0x5c)
+#define TAS2781_PRM_RSVD_REG TASDEVICE_REG(0x00, 0x01, 0x19)
+#define TAS2781_PRM_TEST_57_REG TASDEVICE_REG(0x00, 0xfd, 0x39)
+#define TAS2781_PRM_TEST_62_REG TASDEVICE_REG(0x00, 0xfd, 0x3e)
+#define TAS2781_PRM_PVDD_UVLO_REG TASDEVICE_REG(0x00, 0x00, 0x71)
+#define TAS2781_PRM_CHNL_0_REG TASDEVICE_REG(0x00, 0x00, 0x03)
+#define TAS2781_PRM_NG_CFG0_REG TASDEVICE_REG(0x00, 0x00, 0x35)
+#define TAS2781_PRM_IDLE_CH_DET_REG TASDEVICE_REG(0x00, 0x00, 0x66)
+#define TAS2781_PRM_PLT_FLAG_REG TASDEVICE_REG(0x00, 0x14, 0x38)
+#define TAS2781_PRM_SINEGAIN_REG TASDEVICE_REG(0x00, 0x14, 0x40)
+#define TAS2781_PRM_SINEGAIN2_REG TASDEVICE_REG(0x00, 0x14, 0x44)
+
+#define TAS2781_TEST_UNLOCK_REG TASDEVICE_REG(0x00, 0xFD, 0x0D)
+#define TAS2781_TEST_PAGE_UNLOCK 0x0D
+
+#define TAS2781_RUNTIME_LATCH_RE_REG TASDEVICE_REG(0x00, 0x00, 0x49)
+#define TAS2781_RUNTIME_RE_REG_TF TASDEVICE_REG(0x64, 0x62, 0x48)
+#define TAS2781_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x63, 0x44)
+
#define TASDEVICE_CMD_SING_W 0x1
#define TASDEVICE_CMD_BURST 0x2
#define TASDEVICE_CMD_DELAY 0x3
@@ -70,7 +117,15 @@ enum device_catlog_id {
OTHERS
};
+struct bulk_reg_val {
+ int reg;
+ unsigned char val[4];
+ unsigned char val_len;
+ bool is_locked;
+};
+
struct tasdevice {
+ struct bulk_reg_val *cali_data_backup;
struct tasdevice_fw *cali_data_fmw;
unsigned int dev_addr;
unsigned int err_code;
@@ -81,19 +136,23 @@ struct tasdevice {
bool is_loaderr;
};
-struct tasdevice_irqinfo {
- int irq_gpio;
- int irq;
+struct cali_reg {
+ unsigned int r0_reg;
+ unsigned int r0_low_reg;
+ unsigned int invr0_reg;
+ unsigned int pow_reg;
+ unsigned int tlimit_reg;
};
struct calidata {
unsigned char *data;
unsigned long total_sz;
+ struct cali_reg cali_reg_array;
+ unsigned int cali_dat_sz_per_dev;
};
struct tasdevice_priv {
struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS];
- struct tasdevice_irqinfo irq_info;
struct tasdevice_rca rcabin;
struct calidata cali_data;
struct tasdevice_fw *fmw;
@@ -115,6 +174,7 @@ struct tasdevice_priv {
unsigned int chip_id;
unsigned int sysclk;
+ int irq;
int cur_prog;
int cur_conf;
int fw_state;
@@ -124,6 +184,7 @@ struct tasdevice_priv {
bool force_fwload_status;
bool playback_started;
bool isacpi;
+ bool is_user_space_calidata;
unsigned int global_addr;
int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
@@ -150,6 +211,8 @@ int tasdevice_init(struct tasdevice_priv *tas_priv);
void tasdevice_remove(struct tasdevice_priv *tas_priv);
int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
+int tasdev_chn_switch(struct tasdevice_priv *tas_priv,
+ unsigned short chn);
int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
unsigned short chn, unsigned int reg, unsigned int *value);
int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
diff --git a/include/sound/ump.h b/include/sound/ump.h
index 91238dabe307..532c2c3ea28e 100644
--- a/include/sound/ump.h
+++ b/include/sound/ump.h
@@ -13,6 +13,15 @@ struct snd_ump_ops;
struct ump_cvt_to_ump;
struct snd_seq_ump_ops;
+struct snd_ump_group {
+ int group; /* group index (0-based) */
+ unsigned int dir_bits; /* directions */
+ bool active; /* activeness */
+ bool valid; /* valid group (referred by blocks) */
+ bool is_midi1; /* belongs to a MIDI1 FB */
+ char name[64]; /* group name */
+};
+
struct snd_ump_endpoint {
struct snd_rawmidi core; /* raw UMP access */
@@ -41,6 +50,8 @@ struct snd_ump_endpoint {
struct mutex open_mutex;
+ struct snd_ump_group groups[SNDRV_UMP_MAX_GROUPS]; /* table of groups */
+
#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
spinlock_t legacy_locks[2];
struct snd_rawmidi *legacy_rmidi;
@@ -112,6 +123,7 @@ static inline int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
int snd_ump_receive_ump_val(struct snd_ump_endpoint *ump, u32 val);
int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol);
+void snd_ump_update_group_attrs(struct snd_ump_endpoint *ump);
/*
* Some definitions for UMP
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index 1ddd3036bdfc..ca87fa6a8135 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -155,7 +155,6 @@ struct vx_core {
unsigned int chip_status;
unsigned int pcm_running;
- struct device *dev;
struct snd_hwdep *hwdep;
struct vx_rmh irq_rmh; /* RMH used in interrupts */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 739df993aa5e..4063a701081b 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -3,7 +3,7 @@
#define TARGET_CORE_BACKEND_H
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <target/target_core_base.h>
#define TRANSPORT_FLAG_PASSTHROUGH 0x1
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 202fc3680c36..6696dbcc2b96 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#include <sound/jack.h>
+#include <sound/pcm.h>
#define DAPM_DIRECT "(direct)"
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
@@ -212,7 +213,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
),
TP_printk("%s: found %d paths",
- __entry->stream ? "capture" : "playback", __entry->paths)
+ snd_pcm_direction_name(__entry->stream), __entry->paths)
);
TRACE_EVENT(snd_soc_jack_irq,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 0a523023bdcc..af6b3827fb1d 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -674,10 +674,10 @@ TRACE_EVENT(btrfs_finish_ordered_extent,
DECLARE_EVENT_CLASS(btrfs__writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc),
+ TP_ARGS(folio, inode, wbc),
TP_STRUCT__entry_btrfs(
__field( u64, ino )
@@ -695,7 +695,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->index = page->index;
+ __entry->index = folio->index;
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->range_start = wbc->range_start;
@@ -721,12 +721,12 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->writeback_index)
);
-DEFINE_EVENT(btrfs__writepage, __extent_writepage,
+DEFINE_EVENT(btrfs__writepage, extent_writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc)
+ TP_ARGS(folio, inode, wbc)
);
TRACE_EVENT(btrfs_writepage_end_io_hook,
@@ -1716,7 +1716,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = rec->bytenr,
+ __entry->bytenr = rec->bytenr;
__entry->num_bytes = rec->num_bytes;
),
@@ -1825,7 +1825,7 @@ TRACE_EVENT(qgroup_update_counters,
TRACE_EVENT(qgroup_update_reserve,
- TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup,
s64 diff, int type),
TP_ARGS(fs_info, qgroup, diff, type),
@@ -1851,7 +1851,7 @@ TRACE_EVENT(qgroup_update_reserve,
TRACE_EVENT(qgroup_meta_reserve,
- TP_PROTO(struct btrfs_root *root, s64 diff, int type),
+ TP_PROTO(const struct btrfs_root *root, s64 diff, int type),
TP_ARGS(root, diff, type),
@@ -1874,7 +1874,7 @@ TRACE_EVENT(qgroup_meta_reserve,
TRACE_EVENT(qgroup_meta_convert,
- TP_PROTO(struct btrfs_root *root, s64 diff),
+ TP_PROTO(const struct btrfs_root *root, s64 diff),
TP_ARGS(root, diff),
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
new file mode 100644
index 000000000000..569f86a44aaa
--- /dev/null
+++ b/include/trace/events/dma.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma
+
+#if !defined(_TRACE_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_H
+
+#include <linux/tracepoint.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <trace/events/mmflags.h>
+
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define decode_dma_data_direction(dir) \
+ __print_symbolic(dir, \
+ { DMA_BIDIRECTIONAL, "BIDIRECTIONAL" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
+#define decode_dma_attrs(attrs) \
+ __print_flags(attrs, "|", \
+ { DMA_ATTR_WEAK_ORDERING, "WEAK_ORDERING" }, \
+ { DMA_ATTR_WRITE_COMBINE, "WRITE_COMBINE" }, \
+ { DMA_ATTR_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING" }, \
+ { DMA_ATTR_SKIP_CPU_SYNC, "SKIP_CPU_SYNC" }, \
+ { DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
+ { DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
+ { DMA_ATTR_NO_WARN, "NO_WARN" }, \
+ { DMA_ATTR_PRIVILEGED, "PRIVILEGED" })
+
+DECLARE_EVENT_CLASS(dma_map,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+DEFINE_EVENT(dma_map, dma_map_page,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
+
+DEFINE_EVENT(dma_map, dma_map_resource,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
+
+DECLARE_EVENT_CLASS(dma_unmap,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->addr,
+ __entry->size,
+ decode_dma_attrs(__entry->attrs))
+);
+
+DEFINE_EVENT(dma_unmap, dma_unmap_page,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs));
+
+DEFINE_EVENT(dma_unmap, dma_unmap_resource,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs));
+
+TRACE_EVENT(dma_alloc,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, gfp_t flags, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, flags, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(gfp_t, flags)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = virt_to_phys(virt_addr);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->flags = flags;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx flags=%s attrs=%s",
+ __get_str(device),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ show_gfp_flags(__entry->flags),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_free,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = virt_to_phys(virt_addr);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_map_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ int ents, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, ents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, nents)
+ __dynamic_array(u64, dma_addrs, ents)
+ __dynamic_array(unsigned int, lengths, ents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ for_each_sg(sgl, sg, ents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)),
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_unmap_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, addrs, nents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(addrs))[i] = sg_phys(sg);
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(addrs),
+ __get_dynamic_array_len(addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+DECLARE_EVENT_CLASS(dma_sync_single,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size)
+);
+
+DEFINE_EVENT(dma_sync_single, dma_sync_single_for_cpu,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir));
+
+DEFINE_EVENT(dma_sync_single, dma_sync_single_for_device,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir));
+
+DECLARE_EVENT_CLASS(dma_sync_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sgl, nents, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, dma_addrs, nents)
+ __dynamic_array(unsigned int, lengths, nents)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)))
+);
+
+DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_cpu,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sg, nents, dir));
+
+DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_device,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sg, nents, dir));
+
+#endif /* _TRACE_DMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index cc5e9b7b2b44..156908641e68 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -91,7 +91,6 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
#define show_falloc_mode(mode) __print_flags(mode, "|", \
{ FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \
{ FALLOC_FL_PUNCH_HOLE, "PUNCH_HOLE"}, \
- { FALLOC_FL_NO_HIDE_STALE, "NO_HIDE_STALE"}, \
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
{ FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index ed794b5fefbe..2851c823095b 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
- { CP_RECOVER_DIR, "dir needs recovery" })
+ { CP_RECOVER_DIR, "dir needs recovery" }, \
+ { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \
__print_symbolic(type, \
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 46c89c1e460c..f48fe637bfd2 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -56,6 +56,90 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_ARGS(folio)
);
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
+
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+
+ TP_ARGS(mapping, index, last_index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ __field(unsigned long, last_index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ __entry->last_index = last_index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld-%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT,
+ ((((loff_t)__entry->last_index + 1) << PAGE_SHIFT) - 1)
+ )
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+TRACE_EVENT(mm_filemap_fault,
+ TP_PROTO(struct address_space *mapping, pgoff_t index),
+
+ TP_ARGS(mapping, index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT
+ )
+);
+
TRACE_EVENT(filemap_set_wb_err,
TP_PROTO(struct address_space *mapping, errseq_t eseq),
diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h
index b108176deb22..ad0e0cf82b9c 100644
--- a/include/trace/events/firewire.h
+++ b/include/trace/events/firewire.h
@@ -830,13 +830,13 @@ TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
#ifndef show_cause
enum fw_iso_context_completions_cause {
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
- FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
};
#define show_cause(cause) \
__print_symbolic(cause, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
- { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ, "IRQ" }, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT, "INTERRUPT" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
)
#endif
diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
index 0d88ebf2c980..70323acde1de 100644
--- a/include/trace/events/intel_ifs.h
+++ b/include/trace/events/intel_ifs.h
@@ -35,6 +35,33 @@ TRACE_EVENT(ifs_status,
__entry->status)
);
+TRACE_EVENT(ifs_sbaf,
+
+ TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status),
+
+ TP_ARGS(batch, activate, status),
+
+ TP_STRUCT__entry(
+ __field( u64, status )
+ __field( int, batch )
+ __field( u16, bundle )
+ __field( u16, pgm )
+ ),
+
+ TP_fast_assign(
+ __entry->status = status.data;
+ __entry->batch = batch;
+ __entry->bundle = activate.bundle_idx;
+ __entry->pgm = activate.pgm_idx;
+ ),
+
+ TP_printk("batch: 0x%.2x, bundle_idx: 0x%.4x, pgm_idx: 0x%.4x, status: 0x%.16llx",
+ __entry->batch,
+ __entry->bundle,
+ __entry->pgm,
+ __entry->status)
+);
+
#endif /* _TRACE_IFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index b63d211bd141..bb8a59c6caa2 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -71,12 +71,6 @@
#define IF_HAVE_PG_MLOCK(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-#define IF_HAVE_PG_UNCACHED(_name) ,{1UL << PG_##_name, __stringify(_name)}
-#else
-#define IF_HAVE_PG_UNCACHED(_name)
-#endif
-
#ifdef CONFIG_MEMORY_FAILURE
#define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
@@ -89,10 +83,16 @@
#define IF_HAVE_PG_IDLE(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
-#define IF_HAVE_PG_ARCH_X(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+#define IF_HAVE_PG_ARCH_2(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#else
+#define IF_HAVE_PG_ARCH_2(_name)
+#endif
+
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
+#define IF_HAVE_PG_ARCH_3(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_ARCH_X(_name)
+#define IF_HAVE_PG_ARCH_3(_name)
#endif
#define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) }
@@ -100,7 +100,6 @@
#define __def_pageflag_names \
DEF_PAGEFLAG_NAME(locked), \
DEF_PAGEFLAG_NAME(waiters), \
- DEF_PAGEFLAG_NAME(error), \
DEF_PAGEFLAG_NAME(referenced), \
DEF_PAGEFLAG_NAME(uptodate), \
DEF_PAGEFLAG_NAME(dirty), \
@@ -108,42 +107,31 @@
DEF_PAGEFLAG_NAME(active), \
DEF_PAGEFLAG_NAME(workingset), \
DEF_PAGEFLAG_NAME(owner_priv_1), \
+ DEF_PAGEFLAG_NAME(owner_2), \
DEF_PAGEFLAG_NAME(arch_1), \
DEF_PAGEFLAG_NAME(reserved), \
DEF_PAGEFLAG_NAME(private), \
DEF_PAGEFLAG_NAME(private_2), \
DEF_PAGEFLAG_NAME(writeback), \
DEF_PAGEFLAG_NAME(head), \
- DEF_PAGEFLAG_NAME(mappedtodisk), \
DEF_PAGEFLAG_NAME(reclaim), \
DEF_PAGEFLAG_NAME(swapbacked), \
DEF_PAGEFLAG_NAME(unevictable) \
IF_HAVE_PG_MLOCK(mlocked) \
-IF_HAVE_PG_UNCACHED(uncached) \
IF_HAVE_PG_HWPOISON(hwpoison) \
IF_HAVE_PG_IDLE(idle) \
IF_HAVE_PG_IDLE(young) \
-IF_HAVE_PG_ARCH_X(arch_2) \
-IF_HAVE_PG_ARCH_X(arch_3)
+IF_HAVE_PG_ARCH_2(arch_2) \
+IF_HAVE_PG_ARCH_3(arch_3)
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
__def_pageflag_names \
) : "none"
-#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
-
-#define __def_pagetype_names \
- DEF_PAGETYPE_NAME(slab), \
- DEF_PAGETYPE_NAME(hugetlb), \
- DEF_PAGETYPE_NAME(offline), \
- DEF_PAGETYPE_NAME(guard), \
- DEF_PAGETYPE_NAME(table), \
- DEF_PAGETYPE_NAME(buddy)
-
#if defined(CONFIG_X86)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
-#elif defined(CONFIG_PPC)
+#elif defined(CONFIG_PPC64)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
#elif defined(CONFIG_PARISC)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
@@ -165,7 +153,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
# define IF_HAVE_UFFD_MINOR(flag, name)
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
# define IF_HAVE_VM_DROPPABLE(flag, name) {flag, name},
#else
# define IF_HAVE_VM_DROPPABLE(flag, name)
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 606b4a0f92da..1d7c52821e55 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -20,6 +20,7 @@
EM(netfs_read_trace_expanded, "EXPANDED ") \
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_read_gaps, "READ-GAPS") \
EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
@@ -33,13 +34,14 @@
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
+ EM(NETFS_READ_GAPS, "RG") \
EM(NETFS_READ_FOR_WRITE, "RW") \
- EM(NETFS_COPY_TO_CACHE, "CC") \
+ EM(NETFS_DIO_READ, "DR") \
EM(NETFS_WRITEBACK, "WB") \
EM(NETFS_WRITETHROUGH, "WT") \
EM(NETFS_UNBUFFERED_WRITE, "UW") \
- EM(NETFS_DIO_READ, "DR") \
- E_(NETFS_DIO_WRITE, "DW")
+ EM(NETFS_DIO_WRITE, "DW") \
+ E_(NETFS_PGPRIV2_COPY_TO_CACHE, "2C")
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
@@ -60,6 +62,7 @@
E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
+ EM(NETFS_SOURCE_UNKNOWN, "----") \
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
EM(NETFS_READ_FROM_CACHE, "READ") \
@@ -69,15 +72,25 @@
E_(NETFS_INVALID_WRITE, "INVL")
#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_add_donations, "+DON ") \
+ EM(netfs_sreq_trace_added, "ADD ") \
+ EM(netfs_sreq_trace_clear, "CLEAR") \
EM(netfs_sreq_trace_discard, "DSCRD") \
+ EM(netfs_sreq_trace_donate_to_prev, "DON-P") \
+ EM(netfs_sreq_trace_donate_to_next, "DON-N") \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
EM(netfs_sreq_trace_fail, "FAIL ") \
EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_hit_eof, "EOF ") \
+ EM(netfs_sreq_trace_io_progress, "IO ") \
EM(netfs_sreq_trace_limited, "LIMIT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
EM(netfs_sreq_trace_prep_failed, "PRPFL") \
- EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_progress, "PRGRS") \
+ EM(netfs_sreq_trace_reprep_failed, "REPFL") \
EM(netfs_sreq_trace_retry, "RETRY") \
+ EM(netfs_sreq_trace_short, "SHORT") \
+ EM(netfs_sreq_trace_split, "SPLIT") \
EM(netfs_sreq_trace_submit, "SUBMT") \
EM(netfs_sreq_trace_terminated, "TERM ") \
EM(netfs_sreq_trace_write, "WRITE") \
@@ -118,7 +131,7 @@
EM(netfs_sreq_trace_new, "NEW ") \
EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
- EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
+ EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \
EM(netfs_sreq_trace_put_done, "PUT DONE ") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
@@ -129,7 +142,6 @@
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
#define netfs_folio_traces \
- /* The first few correspond to enum netfs_how_to_modify */ \
EM(netfs_folio_is_uptodate, "mod-uptodate") \
EM(netfs_just_prefetch, "mod-prefetch") \
EM(netfs_whole_folio_modify, "mod-whole-f") \
@@ -139,8 +151,9 @@
EM(netfs_flush_content, "flush") \
EM(netfs_streaming_filled_page, "mod-streamw-f") \
EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
- /* The rest are for writeback */ \
+ EM(netfs_folio_trace_abandon, "abandon") \
EM(netfs_folio_trace_cancel_copy, "cancel-copy") \
+ EM(netfs_folio_trace_cancel_store, "cancel-store") \
EM(netfs_folio_trace_clear, "clear") \
EM(netfs_folio_trace_clear_cc, "clear-cc") \
EM(netfs_folio_trace_clear_g, "clear-g") \
@@ -155,7 +168,12 @@
EM(netfs_folio_trace_mkwrite, "mkwrite") \
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
EM(netfs_folio_trace_not_under_wback, "!wback") \
+ EM(netfs_folio_trace_put, "put") \
+ EM(netfs_folio_trace_read, "read") \
+ EM(netfs_folio_trace_read_done, "read-done") \
EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_read_put, "read-put") \
+ EM(netfs_folio_trace_read_unlock, "read-unlock") \
EM(netfs_folio_trace_redirtied, "redirtied") \
EM(netfs_folio_trace_store, "store") \
EM(netfs_folio_trace_store_copy, "store-copy") \
@@ -168,6 +186,12 @@
EM(netfs_contig_trace_jump, "-->JUMP-->") \
E_(netfs_contig_trace_unlock, "Unlock")
+#define netfs_donate_traces \
+ EM(netfs_trace_donate_tail_to_prev, "tail-to-prev") \
+ EM(netfs_trace_donate_to_prev, "to-prev") \
+ EM(netfs_trace_donate_to_next, "to-next") \
+ E_(netfs_trace_donate_to_deferred_next, "defer-next")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -185,6 +209,7 @@ enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte);
+enum netfs_donate_trace { netfs_donate_traces } __mode(byte);
#endif
@@ -207,6 +232,7 @@ netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
netfs_folio_traces;
netfs_collect_contig_traces;
+netfs_donate_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -227,6 +253,7 @@ TRACE_EVENT(netfs_read,
TP_STRUCT__entry(
__field(unsigned int, rreq )
__field(unsigned int, cookie )
+ __field(loff_t, i_size )
__field(loff_t, start )
__field(size_t, len )
__field(enum netfs_read_trace, what )
@@ -236,18 +263,19 @@ TRACE_EVENT(netfs_read,
TP_fast_assign(
__entry->rreq = rreq->debug_id;
__entry->cookie = rreq->cache_resources.debug_id;
+ __entry->i_size = rreq->i_size;
__entry->start = start;
__entry->len = len;
__entry->what = what;
__entry->netfs_inode = rreq->inode->i_ino;
),
- TP_printk("R=%08x %s c=%08x ni=%x s=%llx %zx",
+ TP_printk("R=%08x %s c=%08x ni=%x s=%llx l=%zx sz=%llx",
__entry->rreq,
__print_symbolic(__entry->what, netfs_read_traces),
__entry->cookie,
__entry->netfs_inode,
- __entry->start, __entry->len)
+ __entry->start, __entry->len, __entry->i_size)
);
TRACE_EVENT(netfs_rreq,
@@ -420,7 +448,8 @@ TRACE_EVENT(netfs_folio,
),
TP_fast_assign(
- __entry->ino = folio->mapping->host->i_ino;
+ struct address_space *__m = READ_ONCE(folio->mapping);
+ __entry->ino = __m ? __m->host->i_ino : 0;
__entry->why = why;
__entry->index = folio_index(folio);
__entry->nr = folio_nr_pages(folio);
@@ -513,33 +542,6 @@ TRACE_EVENT(netfs_collect,
__entry->start + __entry->len)
);
-TRACE_EVENT(netfs_collect_contig,
- TP_PROTO(const struct netfs_io_request *wreq, unsigned long long to,
- enum netfs_collect_contig_trace type),
-
- TP_ARGS(wreq, to, type),
-
- TP_STRUCT__entry(
- __field(unsigned int, wreq)
- __field(enum netfs_collect_contig_trace, type)
- __field(unsigned long long, contiguity)
- __field(unsigned long long, to)
- ),
-
- TP_fast_assign(
- __entry->wreq = wreq->debug_id;
- __entry->type = type;
- __entry->contiguity = wreq->contiguity;
- __entry->to = to;
- ),
-
- TP_printk("R=%08x %llx -> %llx %s",
- __entry->wreq,
- __entry->contiguity,
- __entry->to,
- __print_symbolic(__entry->type, netfs_collect_contig_traces))
- );
-
TRACE_EVENT(netfs_collect_sreq,
TP_PROTO(const struct netfs_io_request *wreq,
const struct netfs_io_subrequest *subreq),
@@ -611,7 +613,6 @@ TRACE_EVENT(netfs_collect_state,
__field(unsigned int, notes )
__field(unsigned long long, collected_to )
__field(unsigned long long, cleaned_to )
- __field(unsigned long long, contiguity )
),
TP_fast_assign(
@@ -619,12 +620,11 @@ TRACE_EVENT(netfs_collect_state,
__entry->notes = notes;
__entry->collected_to = collected_to;
__entry->cleaned_to = wreq->cleaned_to;
- __entry->contiguity = wreq->contiguity;
),
- TP_printk("R=%08x cto=%llx fto=%llx ctg=%llx n=%x",
+ TP_printk("R=%08x col=%llx cln=%llx n=%x",
__entry->wreq, __entry->collected_to,
- __entry->cleaned_to, __entry->contiguity,
+ __entry->cleaned_to,
__entry->notes)
);
@@ -681,6 +681,71 @@ TRACE_EVENT(netfs_collect_stream,
__entry->collected_to, __entry->front)
);
+TRACE_EVENT(netfs_progress,
+ TP_PROTO(const struct netfs_io_subrequest *subreq,
+ unsigned long long start, size_t avail, size_t part),
+
+ TP_ARGS(subreq, start, avail, part),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, subreq)
+ __field(unsigned int, consumed)
+ __field(unsigned int, transferred)
+ __field(unsigned long long, f_start)
+ __field(unsigned int, f_avail)
+ __field(unsigned int, f_part)
+ __field(unsigned char, slot)
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = subreq->rreq->debug_id;
+ __entry->subreq = subreq->debug_index;
+ __entry->consumed = subreq->consumed;
+ __entry->transferred = subreq->transferred;
+ __entry->f_start = start;
+ __entry->f_avail = avail;
+ __entry->f_part = part;
+ __entry->slot = subreq->curr_folioq_slot;
+ ),
+
+ TP_printk("R=%08x[%02x] s=%llx ct=%x/%x pa=%x/%x sl=%x",
+ __entry->rreq, __entry->subreq, __entry->f_start,
+ __entry->consumed, __entry->transferred,
+ __entry->f_part, __entry->f_avail, __entry->slot)
+ );
+
+TRACE_EVENT(netfs_donate,
+ TP_PROTO(const struct netfs_io_request *rreq,
+ const struct netfs_io_subrequest *from,
+ const struct netfs_io_subrequest *to,
+ size_t amount,
+ enum netfs_donate_trace trace),
+
+ TP_ARGS(rreq, from, to, amount, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, from)
+ __field(unsigned int, to)
+ __field(unsigned int, amount)
+ __field(enum netfs_donate_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->from = from->debug_index;
+ __entry->to = to ? to->debug_index : -1;
+ __entry->amount = amount;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("R=%08x[%02x] -> [%02x] %s am=%x",
+ __entry->rreq, __entry->from, __entry->to,
+ __print_symbolic(__entry->trace, netfs_donate_traces),
+ __entry->amount)
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index a42be4c8563b..9f0a5d1482c4 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
),
TP_fast_assign(
- __entry->node = zone_to_nid(zoneref->zone);
- __entry->zone_idx = zoneref->zone_idx;
+ __entry->node = zonelist_node_idx(zoneref);
+ __entry->zone_idx = zonelist_zone_idx(zoneref);
__entry->order = order;
__entry->reclaimable = reclaimable;
__entry->available = available;
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
index 12b35e4ff917..8022701c446d 100644
--- a/include/trace/events/pwm.h
+++ b/include/trace/events/pwm.h
@@ -15,7 +15,8 @@ DECLARE_EVENT_CLASS(pwm,
TP_ARGS(pwm, state, err),
TP_STRUCT__entry(
- __field(struct pwm_device *, pwm)
+ __field(unsigned int, chipid)
+ __field(unsigned int, hwpwm)
__field(u64, period)
__field(u64, duty_cycle)
__field(enum pwm_polarity, polarity)
@@ -24,7 +25,8 @@ DECLARE_EVENT_CLASS(pwm,
),
TP_fast_assign(
- __entry->pwm = pwm;
+ __entry->chipid = pwm->chip->id;
+ __entry->hwpwm = pwm->hwpwm;
__entry->period = state->period;
__entry->duty_cycle = state->duty_cycle;
__entry->polarity = state->polarity;
@@ -32,8 +34,8 @@ DECLARE_EVENT_CLASS(pwm,
__entry->err = err;
),
- TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
- __entry->pwm, __entry->period, __entry->duty_cycle,
+ TP_printk("pwmchip%u.%u: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
+ __entry->chipid, __entry->hwpwm, __entry->period, __entry->duty_cycle,
__entry->polarity, __entry->enabled, __entry->err)
);
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 31b3e0d3e65f..e81431deaa50 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -466,40 +466,40 @@ TRACE_EVENT(rcu_stall_warning,
/*
* Tracepoint for dyntick-idle entry/exit events. These take 2 strings
* as argument:
- * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
- * being in dyntick-idle mode.
+ * polarity: "Start", "End", "StillWatching" for entering, exiting or still not
+ * being in EQS mode.
* context: "USER" or "IDLE" or "IRQ".
- * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
+ * NMIs nested in IRQs are inferred with nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
- * the ->dynticks counter. Note that task-related and interrupt-related
+ * the RCU_WATCHING counter. Note that task-related and interrupt-related
* events use two separate counters, and that the "++=" and "--=" events
* for irq/NMI will change the counter by two, otherwise by one.
*/
-TRACE_EVENT_RCU(rcu_dyntick,
+TRACE_EVENT_RCU(rcu_watching,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int counter),
- TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ TP_ARGS(polarity, oldnesting, newnesting, counter),
TP_STRUCT__entry(
__field(const char *, polarity)
__field(long, oldnesting)
__field(long, newnesting)
- __field(int, dynticks)
+ __field(int, counter)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = dynticks;
+ __entry->counter = counter;
),
TP_printk("%s %lx %lx %#3x", __entry->polarity,
__entry->oldnesting, __entry->newnesting,
- __entry->dynticks & 0xfff)
+ __entry->counter & 0xfff)
);
/*
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index a96a985c49b3..e6a72646c507 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -2172,6 +2172,29 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
+TRACE_EVENT(svcrdma_device_removal,
+ TP_PROTO(
+ const struct rdma_cm_id *id
+ ),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __string(name, id->device->name)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ memcpy(__entry->addr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("device %s to be removed, disconnecting %pISpc\n",
+ __get_str(name), __entry->addr
+ )
+);
+
DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
const struct svcxprt_rdma *rdma,
diff --git a/include/trace/events/sched_ext.h b/include/trace/events/sched_ext.h
new file mode 100644
index 000000000000..fe19da7315a9
--- /dev/null
+++ b/include/trace/events/sched_ext.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched_ext
+
+#if !defined(_TRACE_SCHED_EXT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCHED_EXT_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sched_ext_dump,
+
+ TP_PROTO(const char *line),
+
+ TP_ARGS(line),
+
+ TP_STRUCT__entry(
+ __string(line, line)
+ ),
+
+ TP_fast_assign(
+ __assign_str(line);
+ ),
+
+ TP_printk("%s",
+ __get_str(line)
+ )
+);
+
+#endif /* _TRACE_SCHED_EXT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
deleted file mode 100644
index c4e209fbdfbb..000000000000
--- a/include/trace/events/ufs.h
+++ /dev/null
@@ -1,399 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ufs
-
-#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_UFS_H
-
-#include <linux/tracepoint.h>
-
-#define str_opcode(opcode) \
- __print_symbolic(opcode, \
- { WRITE_16, "WRITE_16" }, \
- { WRITE_10, "WRITE_10" }, \
- { READ_16, "READ_16" }, \
- { READ_10, "READ_10" }, \
- { SYNCHRONIZE_CACHE, "SYNC" }, \
- { UNMAP, "UNMAP" })
-
-#define UFS_LINK_STATES \
- EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
- EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
- EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE")
-
-#define UFS_PWR_MODES \
- EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
- EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
- EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
- EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
-
-#define UFSCHD_CLK_GATING_STATES \
- EM(CLKS_OFF, "CLKS_OFF") \
- EM(CLKS_ON, "CLKS_ON") \
- EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \
- EMe(REQ_CLKS_ON, "REQ_CLKS_ON")
-
-#define UFS_CMD_TRACE_STRINGS \
- EM(UFS_CMD_SEND, "send_req") \
- EM(UFS_CMD_COMP, "complete_rsp") \
- EM(UFS_DEV_COMP, "dev_complete") \
- EM(UFS_QUERY_SEND, "query_send") \
- EM(UFS_QUERY_COMP, "query_complete") \
- EM(UFS_QUERY_ERR, "query_complete_err") \
- EM(UFS_TM_SEND, "tm_send") \
- EM(UFS_TM_COMP, "tm_complete") \
- EMe(UFS_TM_ERR, "tm_complete_err")
-
-#define UFS_CMD_TRACE_TSF_TYPES \
- EM(UFS_TSF_CDB, "CDB") \
- EM(UFS_TSF_OSF, "OSF") \
- EM(UFS_TSF_TM_INPUT, "TM_INPUT") \
- EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT")
-
-/* Enums require being exported to userspace, for user tool parsing */
-#undef EM
-#undef EMe
-#define EM(a, b) TRACE_DEFINE_ENUM(a);
-#define EMe(a, b) TRACE_DEFINE_ENUM(a);
-
-UFS_LINK_STATES;
-UFS_PWR_MODES;
-UFSCHD_CLK_GATING_STATES;
-UFS_CMD_TRACE_STRINGS
-UFS_CMD_TRACE_TSF_TYPES
-
-/*
- * Now redefine the EM() and EMe() macros to map the enums to the strings
- * that will be printed in the output.
- */
-#undef EM
-#undef EMe
-#define EM(a, b) {a, b},
-#define EMe(a, b) {a, b}
-
-#define show_ufs_cmd_trace_str(str_t) \
- __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS)
-#define show_ufs_cmd_trace_tsf(tsf) \
- __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES)
-
-TRACE_EVENT(ufshcd_clk_gating,
-
- TP_PROTO(const char *dev_name, int state),
-
- TP_ARGS(dev_name, state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(int, state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->state = state;
- ),
-
- TP_printk("%s: gating state changed to %s",
- __get_str(dev_name),
- __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
-);
-
-TRACE_EVENT(ufshcd_clk_scaling,
-
- TP_PROTO(const char *dev_name, const char *state, const char *clk,
- u32 prev_state, u32 curr_state),
-
- TP_ARGS(dev_name, state, clk, prev_state, curr_state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(state, state)
- __string(clk, clk)
- __field(u32, prev_state)
- __field(u32, curr_state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __assign_str(state);
- __assign_str(clk);
- __entry->prev_state = prev_state;
- __entry->curr_state = curr_state;
- ),
-
- TP_printk("%s: %s %s from %u to %u Hz",
- __get_str(dev_name), __get_str(state), __get_str(clk),
- __entry->prev_state, __entry->curr_state)
-);
-
-TRACE_EVENT(ufshcd_auto_bkops_state,
-
- TP_PROTO(const char *dev_name, const char *state),
-
- TP_ARGS(dev_name, state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(state, state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __assign_str(state);
- ),
-
- TP_printk("%s: auto bkops - %s",
- __get_str(dev_name), __get_str(state))
-);
-
-DECLARE_EVENT_CLASS(ufshcd_profiling_template,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
-
- TP_ARGS(dev_name, profile_info, time_us, err),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(profile_info, profile_info)
- __field(s64, time_us)
- __field(int, err)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __assign_str(profile_info);
- __entry->time_us = time_us;
- __entry->err = err;
- ),
-
- TP_printk("%s: %s: took %lld usecs, err %d",
- __get_str(dev_name), __get_str(profile_info),
- __entry->time_us, __entry->err)
-);
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DECLARE_EVENT_CLASS(ufshcd_template,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
-
- TP_ARGS(dev_name, err, usecs, dev_state, link_state),
-
- TP_STRUCT__entry(
- __field(s64, usecs)
- __field(int, err)
- __string(dev_name, dev_name)
- __field(int, dev_state)
- __field(int, link_state)
- ),
-
- TP_fast_assign(
- __entry->usecs = usecs;
- __entry->err = err;
- __assign_str(dev_name);
- __entry->dev_state = dev_state;
- __entry->link_state = link_state;
- ),
-
- TP_printk(
- "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
- __get_str(dev_name),
- __entry->usecs,
- __print_symbolic(__entry->dev_state, UFS_PWR_MODES),
- __print_symbolic(__entry->link_state, UFS_LINK_STATES),
- __entry->err
- )
-);
-
-DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_init,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-TRACE_EVENT(ufshcd_command,
- TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
- unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
- u32 intr, u64 lba, u8 opcode, u8 group_id),
-
- TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
- opcode, group_id),
-
- TP_STRUCT__entry(
- __field(struct scsi_device *, sdev)
- __field(enum ufs_trace_str_t, str_t)
- __field(unsigned int, tag)
- __field(u32, doorbell)
- __field(u32, hwq_id)
- __field(u32, intr)
- __field(u64, lba)
- __field(int, transfer_len)
- __field(u8, opcode)
- __field(u8, group_id)
- ),
-
- TP_fast_assign(
- __entry->sdev = sdev;
- __entry->str_t = str_t;
- __entry->tag = tag;
- __entry->doorbell = doorbell;
- __entry->hwq_id = hwq_id;
- __entry->intr = intr;
- __entry->lba = lba;
- __entry->transfer_len = transfer_len;
- __entry->opcode = opcode;
- __entry->group_id = group_id;
- ),
-
- TP_printk(
- "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d",
- show_ufs_cmd_trace_str(__entry->str_t),
- dev_name(&__entry->sdev->sdev_dev), __entry->tag,
- __entry->doorbell, __entry->transfer_len, __entry->intr,
- __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode),
- (u32)__entry->group_id, __entry->hwq_id
- )
-);
-
-TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
- u32 arg1, u32 arg2, u32 arg3),
-
- TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(enum ufs_trace_str_t, str_t)
- __field(u32, cmd)
- __field(u32, arg1)
- __field(u32, arg2)
- __field(u32, arg3)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->str_t = str_t;
- __entry->cmd = cmd;
- __entry->arg1 = arg1;
- __entry->arg2 = arg2;
- __entry->arg3 = arg3;
- ),
-
- TP_printk(
- "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
- )
-);
-
-TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
- void *tsf, enum ufs_trace_tsf_t tsf_t),
-
- TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(enum ufs_trace_str_t, str_t)
- __array(unsigned char, hdr, 12)
- __array(unsigned char, tsf, 16)
- __field(enum ufs_trace_tsf_t, tsf_t)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->str_t = str_t;
- memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
- memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
- __entry->tsf_t = tsf_t;
- ),
-
- TP_printk(
- "%s: %s: HDR:%s, %s:%s",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __print_hex(__entry->hdr, sizeof(__entry->hdr)),
- show_ufs_cmd_trace_tsf(__entry->tsf_t),
- __print_hex(__entry->tsf, sizeof(__entry->tsf))
- )
-);
-
-TRACE_EVENT(ufshcd_exception_event,
-
- TP_PROTO(const char *dev_name, u16 status),
-
- TP_ARGS(dev_name, status),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(u16, status)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->status = status;
- ),
-
- TP_printk("%s: status 0x%x",
- __get_str(dev_name), __entry->status
- )
-);
-
-#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 54e353c9f919..a261e86e61fa 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -20,7 +20,15 @@
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
- {I_REFERENCED, "I_REFERENCED"} \
+ {I_REFERENCED, "I_REFERENCED"}, \
+ {I_LINKABLE, "I_LINKABLE"}, \
+ {I_WB_SWITCH, "I_WB_SWITCH"}, \
+ {I_OVL_INUSE, "I_OVL_INUSE"}, \
+ {I_CREATING, "I_CREATING"}, \
+ {I_DONTCACHE, "I_DONTCACHE"}, \
+ {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \
+ {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \
+ {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \
)
/* enums need to be exported to user space */
diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
index 7b221d51133a..c82233e950ac 100644
--- a/include/trace/misc/nfs.h
+++ b/include/trace/misc/nfs.h
@@ -51,6 +51,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \
{ ECHILD, "CHILD" }, \
+ { ETIMEDOUT, "TIMEDOUT" }, \
{ NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2d84a8052b15..78abd819fd62 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -703,6 +703,31 @@ extern "C" {
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on integrated graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects.
+ */
+#define I915_FORMAT_MOD_4_TILED_LNL_CCS fourcc_mod_code(INTEL, 16)
+
+/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on discrete graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects. The GEM object must be stored in
+ * contiguous memory with a size aligned to 64KB
+ */
+#define I915_FORMAT_MOD_4_TILED_BMG_CCS fourcc_mod_code(INTEL, 17)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index d390011b89b4..c082810c08a8 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -859,6 +859,8 @@ struct drm_color_lut {
/**
* struct drm_plane_size_hint - Plane size hints
+ * @width: The width of the plane in pixel
+ * @height: The height of the plane in pixel
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 3fca72f73861..2377147b6af0 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -88,6 +88,8 @@ struct drm_msm_timespec {
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
#define MSM_PARAM_RAYTRACING 0x11 /* RO */
+#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
+#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index db232a25189e..b6fbe4988f2e 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -517,7 +517,14 @@ struct drm_xe_query_gt_list {
* available per Dual Sub Slices (DSS). For example a query response
* containing the following in mask:
* ``EU_PER_DSS ff ff 00 00 00 00 00 00``
- * means each DSS has 16 EU.
+ * means each DSS has 16 SIMD8 EUs. This type may be omitted if device
+ * doesn't have SIMD8 EUs.
+ * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
+ * Units (EU) available per Dual Sub Slices (DSS). For example a query
+ * response containing the following in mask:
+ * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 SIMD16 EUs. This type may be omitted if device
+ * doesn't have SIMD16 EUs.
*/
struct drm_xe_query_topology_mask {
/** @gt_id: GT ID the mask is associated with */
@@ -527,6 +534,7 @@ struct drm_xe_query_topology_mask {
#define DRM_XE_TOPO_DSS_COMPUTE 2
#define DRM_XE_TOPO_L3_BANK 3
#define DRM_XE_TOPO_EU_PER_DSS 4
+#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5
/** @type: type of mask */
__u16 type;
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index d44a8118b2ed..1fd92021a573 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -236,6 +236,12 @@ struct binder_frozen_status_info {
__u32 async_recv;
};
+struct binder_frozen_state_info {
+ binder_uintptr_t cookie;
+ __u32 is_frozen;
+ __u32 reserved;
+};
+
/* struct binder_extened_error - extended error information
* @id: identifier for the failed operation
* @command: command as defined by binder_driver_return_protocol
@@ -467,6 +473,17 @@ enum binder_driver_return_protocol {
/*
* The target of the last async transaction is frozen. No parameters.
*/
+
+ BR_FROZEN_BINDER = _IOR('r', 21, struct binder_frozen_state_info),
+ /*
+ * The cookie and a boolean (is_frozen) that indicates whether the process
+ * transitioned into a frozen or an unfrozen state.
+ */
+
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE = _IOR('r', 22, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
};
enum binder_driver_command_protocol {
@@ -550,6 +567,25 @@ enum binder_driver_command_protocol {
/*
* binder_transaction_data_sg: the sent command.
*/
+
+ BC_REQUEST_FREEZE_NOTIFICATION =
+ _IOW('c', 19, struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_CLEAR_FREEZE_NOTIFICATION = _IOW('c', 20,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_FREEZE_NOTIFICATION_DONE = _IOW('c', 21, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
};
#endif /* _UAPI_LINUX_BINDER_H */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d676ed2b246e..75e21a135483 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -143,6 +143,9 @@
#define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */
#define AUDIT_MAC_CALIPSO_ADD 1418 /* NetLabel: add CALIPSO DOI entry */
#define AUDIT_MAC_CALIPSO_DEL 1419 /* NetLabel: del CALIPSO DOI entry */
+#define AUDIT_IPE_ACCESS 1420 /* IPE denial or grant */
+#define AUDIT_IPE_CONFIG_CHANGE 1421 /* IPE config change */
+#define AUDIT_IPE_POLICY_LOAD 1422 /* IPE policy load */
#define AUDIT_FIRST_KERN_ANOM_MSG 1700
#define AUDIT_LAST_KERN_ANOM_MSG 1799
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 1f7925afad2d..8081df849743 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -23,7 +23,7 @@
#define AUTOFS_MIN_PROTO_VERSION 3
#define AUTOFS_MAX_PROTO_VERSION 5
-#define AUTOFS_PROTO_SUBVERSION 5
+#define AUTOFS_PROTO_SUBVERSION 6
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
diff --git a/include/uapi/linux/bits.h b/include/uapi/linux/bits.h
index 3c2a101986a3..5ee30f882736 100644
--- a/include/uapi/linux/bits.h
+++ b/include/uapi/linux/bits.h
@@ -12,4 +12,7 @@
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+#define __GENMASK_U128(h, l) \
+ ((_BIT128((h)) << 1) - (_BIT128(l)))
+
#endif /* _UAPI_LINUX_BITS_H */
diff --git a/include/uapi/linux/blkdev.h b/include/uapi/linux/blkdev.h
new file mode 100644
index 000000000000..66373cd1a83a
--- /dev/null
+++ b/include/uapi/linux/blkdev.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_BLKDEV_H
+#define _UAPI_LINUX_BLKDEV_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * io_uring block file commands, see IORING_OP_URING_CMD.
+ * It's a different number space from ioctl(), reuse the block's code 0x12.
+ */
+#define BLOCK_URING_CMD_DISCARD _IO(0x12, 0)
+
+#endif
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e05b39e39c3f..c6cd7c7aeeee 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5519,11 +5519,12 @@ union bpf_attr {
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
- * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * void *bpf_kptr_xchg(void *dst, void *ptr)
* Description
- * Exchange kptr at pointer *map_value* with *ptr*, and return the
- * old value. *ptr* can be NULL, otherwise it must be a referenced
- * pointer which will be released when this helper is called.
+ * Exchange kptr at pointer *dst* with *ptr*, and return the old value.
+ * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
+ * it must be a referenced pointer which will be released when this helper
+ * is called.
* Return
* The old value of kptr (which can be NULL). The returned pointer
* if not NULL, is a reference which must be released using its
@@ -7513,4 +7514,13 @@ struct bpf_iter_num {
__u64 __opaque[1];
} __attribute__((aligned(8)));
+/*
+ * Flags to control BPF kfunc behaviour.
+ * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
+ * helper documentation for details.)
+ */
+enum bpf_kfunc_flags {
+ BPF_F_PAD_ZEROS = (1ULL << 0),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index b8e071abaea5..b2af1dddd4d7 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -132,6 +132,8 @@ static inline void cec_msg_init(struct cec_msg *msg,
* Set the msg destination to the orig initiator and the msg initiator to the
* orig destination. Note that msg and orig may be the same pointer, in which
* case the change is done in place.
+ *
+ * It also zeroes the reply, timeout and flags fields.
*/
static inline void cec_msg_set_reply_to(struct cec_msg *msg,
struct cec_msg *orig)
@@ -139,7 +141,9 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
/* The destination becomes the initiator and vice versa */
msg->msg[0] = (cec_msg_destination(orig) << 4) |
cec_msg_initiator(orig);
- msg->reply = msg->timeout = 0;
+ msg->reply = 0;
+ msg->timeout = 0;
+ msg->flags = 0;
}
/**
@@ -165,6 +169,7 @@ static inline int cec_msg_recv_is_rx_result(const struct cec_msg *msg)
/* cec_msg flags field */
#define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0)
#define CEC_MSG_FL_RAW (1 << 1)
+#define CEC_MSG_FL_REPLY_VENDOR_ID (1 << 2)
/* cec_msg tx/rx_status field */
#define CEC_TX_STATUS_OK (1 << 0)
@@ -339,6 +344,8 @@ static inline int cec_is_unconfigured(__u16 log_addr_mask)
#define CEC_CAP_MONITOR_PIN (1 << 7)
/* CEC_ADAP_G_CONNECTOR_INFO is available */
#define CEC_CAP_CONNECTOR_INFO (1 << 8)
+/* CEC_MSG_FL_REPLY_VENDOR_ID is available */
+#define CEC_CAP_REPLY_VENDOR_ID (1 << 9)
/**
* struct cec_caps - CEC capabilities structure.
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index a429381e7ca5..e16be0d37746 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __BIT128() would not work in the asm code, as it shifts an
+ * 'unsigned __init128' data type as direct representation of
+ * 128 bit constants is not supported in the gcc compiler, as
+ * they get silently truncated.
+ *
+ * TODO: Please revisit this implementation when gcc compiler
+ * starts representing 128 bit constants directly like long
+ * and unsigned long etc. Subsequently drop the comment for
+ * GENMASK_U128() which would then start supporting asm code.
+ */
+#define _BIT128(x) ((unsigned __int128)(1) << (x))
+#endif
+
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index b54b313bcf07..b9935988da5c 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -411,6 +411,7 @@ typedef struct elf64_shdr {
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
/* Old binutils treats 0x203 as a CET state */
#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
+#define NT_X86_XSAVE_LAYOUT 0x205 /* XSAVE layout description */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
@@ -441,6 +442,7 @@ typedef struct elf64_shdr {
#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register */
+#define NT_ARM_POE 0x40f /* ARM POE registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
diff --git a/include/uapi/linux/exfat.h b/include/uapi/linux/exfat.h
new file mode 100644
index 000000000000..46d95b16fc4b
--- /dev/null
+++ b/include/uapi/linux/exfat.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2024 Unisoc Technologies Co., Ltd.
+ */
+
+#ifndef _UAPI_LINUX_EXFAT_H
+#define _UAPI_LINUX_EXFAT_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * exfat-specific ioctl commands
+ */
+
+#define EXFAT_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+/*
+ * Flags used by EXFAT_IOC_SHUTDOWN
+ */
+
+#define EXFAT_GOING_DOWN_DEFAULT 0x0 /* default with full sync */
+#define EXFAT_GOING_DOWN_FULLSYNC 0x1 /* going down with full sync*/
+#define EXFAT_GOING_DOWN_NOSYNC 0x2 /* going down */
+
+#endif /* _UAPI_LINUX_EXFAT_H */
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index 51398fa57f6c..5810371ed72b 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -2,6 +2,7 @@
#ifndef _UAPI_FALLOC_H_
#define _UAPI_FALLOC_H_
+#define FALLOC_FL_ALLOCATE_RANGE 0x00 /* allocate range */
#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */
#define FALLOC_FL_PUNCH_HOLE 0x02 /* de-allocates range */
#define FALLOC_FL_NO_HIDE_STALE 0x04 /* reserved codepoint */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index c0bcc185fa48..87e2dec79fea 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -16,6 +16,9 @@
#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3)
+/* Was the file just created? */
+#define F_CREATED_QUERY (F_LINUX_SPECIFIC_BASE + 4)
+
/*
* Cancel a blocking posix lock; internal use only until we expose an
* asynchronous lock api to userspace:
@@ -87,37 +90,70 @@
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+#define AT_FDCWD -100 /* Special value for dirfd used to
+ indicate openat should use the
+ current working directory. */
+
+
+/* Generic flags for the *at(2) family of syscalls. */
+
+/* Reserved for per-syscall flags 0xff. */
+#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic
+ links. */
+/* Reserved for per-syscall flags 0x200 */
+#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
+#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount
+ traversal. */
+#define AT_EMPTY_PATH 0x1000 /* Allow empty relative
+ pathname to operate on dirfd
+ directly. */
+/*
+ * These flags are currently statx(2)-specific, but they could be made generic
+ * in the future and so they should not be used for other per-syscall flags.
+ */
+#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
+#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
+#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
+#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
+
+#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+
/*
- * The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is
- * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
- * unlinkat. The two functions do completely different things and therefore,
- * the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to
- * faccessat would be undefined behavior and thus treating it equivalent to
- * AT_EACCESS is valid undefined behavior.
+ * Per-syscall flags for the *at(2) family of syscalls.
+ *
+ * These are flags that are so syscall-specific that a user passing these flags
+ * to the wrong syscall is so "clearly wrong" that we can safely call such
+ * usage "undefined behaviour".
+ *
+ * For example, the constants AT_REMOVEDIR and AT_EACCESS have the same value.
+ * AT_EACCESS is meaningful only to faccessat, while AT_REMOVEDIR is meaningful
+ * only to unlinkat. The two functions do completely different things and
+ * therefore, the flags can be allowed to overlap. For example, passing
+ * AT_REMOVEDIR to faccessat would be undefined behavior and thus treating it
+ * equivalent to AT_EACCESS is valid undefined behavior.
+ *
+ * Note for implementers: When picking a new per-syscall AT_* flag, try to
+ * reuse already existing flags first. This leaves us with as many unused bits
+ * as possible, so we can use them for generic bits in the future if necessary.
*/
-#define AT_FDCWD -100 /* Special value used to indicate
- openat should use the current
- working directory. */
-#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
+
+/* Flags for renameat2(2) (must match legacy RENAME_* flags). */
+#define AT_RENAME_NOREPLACE 0x0001
+#define AT_RENAME_EXCHANGE 0x0002
+#define AT_RENAME_WHITEOUT 0x0004
+
+/* Flag for faccessat(2). */
#define AT_EACCESS 0x200 /* Test access permitted for
effective IDs, not real IDs. */
+/* Flag for unlinkat(2). */
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
unlinking file. */
-#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
-#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
-#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
-
-#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
-#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
-#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
-#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
-
-#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+/* Flags for name_to_handle_at(2). */
+#define AT_HANDLE_FID 0x200 /* File handle is needed to compare
+ object identity and may not be
+ usable with open_by_handle_at(2). */
+#define AT_HANDLE_MNT_ID_UNIQUE 0x001 /* Return the u64 unique mount ID. */
-/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */
-#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
- compare object identity and may not
- be usable to open_by_handle_at(2) */
#if defined(__KERNEL__)
#define AT_GETATTR_NOSEC 0x80000000
#endif
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index d08b99d60f6f..f1e99458e29e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -217,6 +217,9 @@
* - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
* - add FUSE_NO_EXPORT_SUPPORT init flag
* - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
+ *
+ * 7.41
+ * - add FUSE_ALLOW_IDMAP
*/
#ifndef _LINUX_FUSE_H
@@ -252,7 +255,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 40
+#define FUSE_KERNEL_MINOR_VERSION 41
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -421,6 +424,7 @@ struct fuse_file_lock {
* FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
+ * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -466,6 +470,7 @@ struct fuse_file_lock {
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
+#define FUSE_ALLOW_IDMAP (1ULL << 40)
/**
* CUSE INIT request/reply flags
@@ -984,6 +989,21 @@ struct fuse_fallocate_in {
*/
#define FUSE_UNIQUE_RESEND (1ULL << 63)
+/**
+ * This value will be set by the kernel to
+ * (struct fuse_in_header).{uid,gid} fields in
+ * case when:
+ * - fuse daemon enabled FUSE_ALLOW_IDMAP
+ * - idmapping information is not available and uid/gid
+ * can not be mapped in accordance with an idmapping.
+ *
+ * Note: an idmapping information always available
+ * for inode creation operations like:
+ * FUSE_MKNOD, FUSE_SYMLINK, FUSE_MKDIR, FUSE_TMPFILE,
+ * FUSE_CREATE and FUSE_RENAME2 (with RENAME_WHITEOUT).
+ */
+#define FUSE_INVALID_UIDGID ((uint32_t)(-1))
+
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h
index 33ebad81720a..d5ee269864e0 100644
--- a/include/uapi/linux/hidraw.h
+++ b/include/uapi/linux/hidraw.h
@@ -46,6 +46,7 @@ struct hidraw_devinfo {
/* The first byte of SOUTPUT and GOUTPUT is the report number */
#define HIDIOCSOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0B, len)
#define HIDIOCGOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0C, len)
+#define HIDIOCREVOKE _IOW('H', 0x0D, int) /* Revoke device access */
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index adc2524fd8e3..1fe79e750470 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -440,11 +440,21 @@ struct io_uring_cqe {
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
* IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
* them from sends.
+ * IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
+ * more completions. In other words, the buffer is being
+ * partially consumed, and will be used by the kernel for
+ * more completions. This is only set for buffers used via
+ * the incremental buffer consumption, as provided by
+ * a ring buffer setup with IOU_PBUF_RING_INC. For any
+ * other provided buffer type, all completions with a
+ * buffer passed back is automatically returned to the
+ * application.
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
#define IORING_CQE_F_NOTIF (1U << 3)
+#define IORING_CQE_F_BUF_MORE (1U << 4)
#define IORING_CQE_BUFFER_SHIFT 16
@@ -507,6 +517,7 @@ struct io_cqring_offsets {
#define IORING_ENTER_SQ_WAIT (1U << 2)
#define IORING_ENTER_EXT_ARG (1U << 3)
#define IORING_ENTER_REGISTERED_RING (1U << 4)
+#define IORING_ENTER_ABS_TIMER (1U << 5)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -542,6 +553,7 @@ struct io_uring_params {
#define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
+#define IORING_FEAT_MIN_TIMEOUT (1U << 15)
/*
* io_uring_register(2) opcodes and arguments
@@ -595,6 +607,11 @@ enum io_uring_register_op {
IORING_REGISTER_NAPI = 27,
IORING_UNREGISTER_NAPI = 28,
+ IORING_REGISTER_CLOCK = 29,
+
+ /* clone registered buffers from source ring to current ring */
+ IORING_REGISTER_CLONE_BUFFERS = 30,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -675,6 +692,21 @@ struct io_uring_restriction {
__u32 resv2[3];
};
+struct io_uring_clock_register {
+ __u32 clockid;
+ __u32 __resv[3];
+};
+
+enum {
+ IORING_REGISTER_SRC_REGISTERED = 1,
+};
+
+struct io_uring_clone_buffers {
+ __u32 src_fd;
+ __u32 flags;
+ __u32 pad[6];
+};
+
struct io_uring_buf {
__u64 addr;
__u32 len;
@@ -707,9 +739,17 @@ struct io_uring_buf_ring {
* mmap(2) with the offset set as:
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
* to get a virtual mapping for the ring.
+ * IOU_PBUF_RING_INC: If set, buffers consumed from this buffer ring can be
+ * consumed incrementally. Normally one (or more) buffers
+ * are fully consumed. With incremental consumptions, it's
+ * feasible to register big ranges of buffers, and each
+ * use of it will consume only as much as it needs. This
+ * requires that both the kernel and application keep
+ * track of where the current read/recv index is at.
*/
enum io_uring_register_pbuf_ring_flags {
IOU_PBUF_RING_MMAP = 1,
+ IOU_PBUF_RING_INC = 2,
};
/* argument for IORING_(UN)REGISTER_PBUF_RING */
@@ -758,7 +798,7 @@ enum io_uring_register_restriction_op {
struct io_uring_getevents_arg {
__u64 sigmask;
__u32 sigmask_sz;
- __u32 pad;
+ __u32 min_wait_usec;
__u64 ts;
};
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 4dde745cfb7e..72010f71c5e4 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -4,8 +4,8 @@
#ifndef _UAPI_IOMMUFD_H
#define _UAPI_IOMMUFD_H
-#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
#define IOMMUFD_TYPE (';')
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 6f2f2720f3ac..ff8032227876 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -7,7 +7,7 @@
*/
#define KPF_LOCKED 0
-#define KPF_ERROR 1
+#define KPF_ERROR 1 /* Now unused */
#define KPF_REFERENCED 2
#define KPF_UPTODATE 3
#define KPF_DIRTY 4
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 285a36601dc9..717307d6b5b7 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -42,9 +42,10 @@
* - 1.14 - Update kfd_event_data
* - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
* - 1.16 - Add contiguous VRAM allocation flag
+ * - 1.17 - Add SDMA queue creation with target SDMA engine ID
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 16
+#define KFD_IOCTL_MINOR_VERSION 17
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -56,6 +57,7 @@ struct kfd_ioctl_get_version_args {
#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
+#define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4
#define KFD_MAX_QUEUE_PERCENTAGE 100
#define KFD_MAX_QUEUE_PRIORITY 15
@@ -78,6 +80,8 @@ struct kfd_ioctl_create_queue_args {
__u64 ctx_save_restore_address; /* to KFD */
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
+ __u32 sdma_engine_id; /* to KFD */
+ __u32 pad;
};
struct kfd_ioctl_destroy_queue_args {
@@ -536,26 +540,29 @@ enum kfd_smi_event {
KFD_SMI_EVENT_ALL_PROCESS = 64
};
+/* The reason of the page migration event */
enum KFD_MIGRATE_TRIGGERS {
- KFD_MIGRATE_TRIGGER_PREFETCH,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
- KFD_MIGRATE_TRIGGER_TTM_EVICTION
+ KFD_MIGRATE_TRIGGER_PREFETCH, /* Prefetch to GPU VRAM or system memory */
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, /* GPU page fault recover */
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, /* CPU page fault recover */
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION /* TTM eviction */
};
+/* The reason of user queue evition event */
enum KFD_QUEUE_EVICTION_TRIGGERS {
- KFD_QUEUE_EVICTION_TRIGGER_SVM,
- KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
- KFD_QUEUE_EVICTION_TRIGGER_TTM,
- KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
- KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
- KFD_QUEUE_EVICTION_CRIU_RESTORE
+ KFD_QUEUE_EVICTION_TRIGGER_SVM, /* SVM buffer migration */
+ KFD_QUEUE_EVICTION_TRIGGER_USERPTR, /* userptr movement */
+ KFD_QUEUE_EVICTION_TRIGGER_TTM, /* TTM move buffer */
+ KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, /* GPU suspend */
+ KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, /* CRIU checkpoint */
+ KFD_QUEUE_EVICTION_CRIU_RESTORE /* CRIU restore */
};
+/* The reason of unmap buffer from GPU event */
enum KFD_SVM_UNMAP_TRIGGERS {
- KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
- KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
- KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, /* MMU notifier CPU buffer movement */
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,/* MMU notifier page migration */
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU /* Unmap to free the buffer */
};
#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
@@ -566,6 +573,77 @@ struct kfd_ioctl_smi_events_args {
__u32 anon_fd; /* from KFD */
};
+/*
+ * SVM event tracing via SMI system management interface
+ *
+ * Open event file descriptor
+ * use ioctl AMDKFD_IOC_SMI_EVENTS, pass in gpuid and return a anonymous file
+ * descriptor to receive SMI events.
+ * If calling with sudo permission, then file descriptor can be used to receive
+ * SVM events from all processes, otherwise, to only receive SVM events of same
+ * process.
+ *
+ * To enable the SVM event
+ * Write event file descriptor with KFD_SMI_EVENT_MASK_FROM_INDEX(event) bitmap
+ * mask to start record the event to the kfifo, use bitmap mask combination
+ * for multiple events. New event mask will overwrite the previous event mask.
+ * KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS) bit requires sudo
+ * permisson to receive SVM events from all process.
+ *
+ * To receive the event
+ * Application can poll file descriptor to wait for the events, then read event
+ * from the file into a buffer. Each event is one line string message, starting
+ * with the event id, then the event specific information.
+ *
+ * To decode event information
+ * The following event format string macro can be used with sscanf to decode
+ * the specific event information.
+ * event triggers: the reason to generate the event, defined as enum for unmap,
+ * eviction and migrate events.
+ * node, from, to, prefetch_loc, preferred_loc: GPU ID, or 0 for system memory.
+ * addr: user mode address, in pages
+ * size: in pages
+ * pid: the process ID to generate the event
+ * ns: timestamp in nanosecond-resolution, starts at system boot time but
+ * stops during suspend
+ * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update
+ * rw: 'W' for write page fault, 'R' for read page fault
+ * rescheduled: 'R' if the queue restore failed and rescheduled to try again
+ */
+#define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\
+ "%x %s\n", (reset_seq_num), (reset_cause)
+
+#define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask, counter)\
+ "%llx:%llx\n", (bitmask), (counter)
+
+#define KFD_EVENT_FMT_VMFAULT(pid, task_name)\
+ "%x:%s\n", (pid), (task_name)
+
+#define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\
+ "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw)
+
+#define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\
+ "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update)
+
+#define KFD_EVENT_FMT_MIGRATE_START(ns, pid, start, size, from, to, prefetch_loc,\
+ preferred_loc, migrate_trigger)\
+ "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\
+ (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger)
+
+#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger)\
+ "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size),\
+ (from), (to), (migrate_trigger)
+
+#define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\
+ "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)
+
+#define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\
+ "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled)
+
+#define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\
+ "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\
+ (node), (unmap_trigger)
+
/**************************************************************************************************
* CRIU IOCTLs (Checkpoint Restore In Userspace)
*
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 2c8dbc74b955..33745642f787 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -44,6 +44,12 @@ struct landlock_ruleset_attr {
* flags`_).
*/
__u64 handled_access_net;
+ /**
+ * @scoped: Bitmask of scopes (cf. `Scope flags`_)
+ * restricting a Landlock domain from accessing outside
+ * resources (e.g. IPCs).
+ */
+ __u64 scoped;
};
/*
@@ -274,4 +280,28 @@ struct landlock_net_port_attr {
#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1)
/* clang-format on */
+
+/**
+ * DOC: scope
+ *
+ * Scope flags
+ * ~~~~~~~~~~~
+ *
+ * These flags enable to isolate a sandboxed process from a set of IPC actions.
+ * Setting a flag for a ruleset will isolate the Landlock domain to forbid
+ * connections to resources outside the domain.
+ *
+ * Scopes:
+ *
+ * - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from
+ * connecting to an abstract UNIX socket created by a process outside the
+ * related Landlock domain (e.g. a parent domain or a non-sandboxed process).
+ * - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal
+ * to another process outside the domain.
+ */
+/* clang-format off */
+#define LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET (1ULL << 0)
+#define LANDLOCK_SCOPE_SIGNAL (1ULL << 1)
+/* clang-format on*/
+
#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h
index 33d8c9f4aa6b..938593dfd5da 100644
--- a/include/uapi/linux/lsm.h
+++ b/include/uapi/linux/lsm.h
@@ -64,6 +64,7 @@ struct lsm_ctx {
#define LSM_ID_LANDLOCK 110
#define LSM_ID_IMA 111
#define LSM_ID_EVM 112
+#define LSM_ID_IPE 113
/*
* LSM_ATTR_XXX definitions identify different LSM attributes
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 80ce0ef43afd..f1d468acfb25 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -42,8 +42,9 @@ enum {
NBD_CMD_WRITE = 1,
NBD_CMD_DISC = 2,
NBD_CMD_FLUSH = 3,
- NBD_CMD_TRIM = 4
+ NBD_CMD_TRIM = 4,
/* userspace defines additional extension commands */
+ NBD_CMD_WRITE_ZEROES = 6,
};
/* values for flags field, these are server interaction specific. */
@@ -51,12 +52,15 @@ enum {
#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
#define NBD_FLAG_SEND_FUA (1 << 3) /* send FUA (forced unit access) */
-/* there is a gap here to match userspace */
+#define NBD_FLAG_ROTATIONAL (1 << 4) /* device is rotational */
#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
+#define NBD_FLAG_SEND_WRITE_ZEROES (1 << 6) /* supports WRITE_ZEROES */
+/* there is a gap here to match userspace */
#define NBD_FLAG_CAN_MULTI_CONN (1 << 8) /* Server supports multiple connections per export. */
/* values for cmd flags in the upper 16 bits of request type */
#define NBD_CMD_FLAG_FUA (1 << 16) /* FUA (forced unit access) op */
+#define NBD_CMD_FLAG_NO_HOLE (1 << 17) /* Do not punch a hole for WRITE_ZEROES */
/* These are client behavior specific flags. */
#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index d6476ca5d7a6..9e9079321380 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1694,7 +1694,7 @@ enum nft_flowtable_flags {
*
* @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
* @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
- * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration (NLA_NESTED)
* @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
* @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
* @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
diff --git a/include/uapi/linux/nsfs.h b/include/uapi/linux/nsfs.h
index 5fad3d0fcd70..34127653fd00 100644
--- a/include/uapi/linux/nsfs.h
+++ b/include/uapi/linux/nsfs.h
@@ -27,4 +27,19 @@
/* Return thread-group leader id of pid in the target pid namespace. */
#define NS_GET_TGID_IN_PIDNS _IOR(NSIO, 0x9, int)
+struct mnt_ns_info {
+ __u32 size;
+ __u32 nr_mounts;
+ __u64 mnt_ns_id;
+};
+
+#define MNT_NS_INFO_SIZE_VER0 16 /* size of first published struct */
+
+/* Get information about namespace. */
+#define NS_MNT_GET_INFO _IOR(NSIO, 10, struct mnt_ns_info)
+/* Get next namespace. */
+#define NS_MNT_GET_NEXT _IOR(NSIO, 11, struct mnt_ns_info)
+/* Get previous namespace. */
+#define NS_MNT_GET_PREV _IOR(NSIO, 12, struct mnt_ns_info)
+
#endif /* __LINUX_NSFS_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 94c00996e633..12323b3334a9 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -634,9 +634,11 @@
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
-#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_RRS_SVE 0x0010 /* Config RRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_CRSSVE PCI_EXP_RTCTL_RRS_SVE /* compatibility */
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
-#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
+#define PCI_EXP_RTCAP_RRS_SV 0x0001 /* Config RRS Software Visibility */
+#define PCI_EXP_RTCAP_CRSVIS PCI_EXP_RTCAP_RRS_SV /* compatibility */
#define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
@@ -740,6 +742,7 @@
#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_NPEM 0x29 /* Native PCIe Enclosure Management */
#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
@@ -1121,6 +1124,40 @@
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Native PCIe Enclosure Management */
+#define PCI_NPEM_CAP 0x04 /* NPEM capability register */
+#define PCI_NPEM_CAP_CAPABLE 0x00000001 /* NPEM Capable */
+
+#define PCI_NPEM_CTRL 0x08 /* NPEM control register */
+#define PCI_NPEM_CTRL_ENABLE 0x00000001 /* NPEM Enable */
+
+/*
+ * Native PCIe Enclosure Management indication bits and Reset command bit
+ * are corresponding for capability and control registers.
+ */
+#define PCI_NPEM_CMD_RESET 0x00000002 /* Reset Command */
+#define PCI_NPEM_IND_OK 0x00000004 /* OK */
+#define PCI_NPEM_IND_LOCATE 0x00000008 /* Locate */
+#define PCI_NPEM_IND_FAIL 0x00000010 /* Fail */
+#define PCI_NPEM_IND_REBUILD 0x00000020 /* Rebuild */
+#define PCI_NPEM_IND_PFA 0x00000040 /* Predicted Failure Analysis */
+#define PCI_NPEM_IND_HOTSPARE 0x00000080 /* Hot Spare */
+#define PCI_NPEM_IND_ICA 0x00000100 /* In Critical Array */
+#define PCI_NPEM_IND_IFA 0x00000200 /* In Failed Array */
+#define PCI_NPEM_IND_IDT 0x00000400 /* Device Type */
+#define PCI_NPEM_IND_DISABLED 0x00000800 /* Disabled */
+#define PCI_NPEM_IND_SPEC_0 0x01000000
+#define PCI_NPEM_IND_SPEC_1 0x02000000
+#define PCI_NPEM_IND_SPEC_2 0x04000000
+#define PCI_NPEM_IND_SPEC_3 0x08000000
+#define PCI_NPEM_IND_SPEC_4 0x10000000
+#define PCI_NPEM_IND_SPEC_5 0x20000000
+#define PCI_NPEM_IND_SPEC_6 0x40000000
+#define PCI_NPEM_IND_SPEC_7 0x80000000
+
+#define PCI_NPEM_STATUS 0x0c /* NPEM status register */
+#define PCI_NPEM_STATUS_CC 0x00000001 /* Command Completed */
+
/* Data Object Exchange */
#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
index 6eeaf8bf2362..430daceafac7 100644
--- a/include/uapi/linux/rkisp1-config.h
+++ b/include/uapi/linux/rkisp1-config.h
@@ -165,6 +165,11 @@
#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
/*
+ * Compand
+ */
+#define RKISP1_CIF_ISP_COMPAND_NUM_POINTS 64
+
+/*
* Measurement types
*/
#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
@@ -851,6 +856,39 @@ struct rkisp1_params_cfg {
struct rkisp1_cif_isp_isp_other_cfg others;
};
+/**
+ * struct rkisp1_cif_isp_compand_bls_config - Rockchip ISP1 Companding parameters (BLS)
+ * @r: Fixed subtraction value for Bayer pattern R
+ * @gr: Fixed subtraction value for Bayer pattern Gr
+ * @gb: Fixed subtraction value for Bayer pattern Gb
+ * @b: Fixed subtraction value for Bayer pattern B
+ *
+ * The values will be subtracted from the sensor values. Note that unlike the
+ * dedicated BLS block, the BLS values in the compander are 20-bit unsigned.
+ */
+struct rkisp1_cif_isp_compand_bls_config {
+ __u32 r;
+ __u32 gr;
+ __u32 gb;
+ __u32 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_compand_curve_config - Rockchip ISP1 Companding
+ * parameters (expand and compression curves)
+ * @px: Compand curve x-values. Each value stores the distance from the
+ * previous x-value, expressed as log2 of the distance on 5 bits.
+ * @x: Compand curve x-values. The functionality of these parameters are
+ * unknown due to do a lack of hardware documentation, but these are left
+ * here for future compatibility purposes.
+ * @y: Compand curve y-values
+ */
+struct rkisp1_cif_isp_compand_curve_config {
+ __u8 px[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 x[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 y[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+};
+
/*---------- PART2: Measurement Statistics ------------*/
/**
@@ -996,4 +1034,544 @@ struct rkisp1_stat_buffer {
struct rkisp1_cif_isp_stat params;
};
+/*---------- PART3: Extensible Configuration Parameters ------------*/
+
+/**
+ * enum rkisp1_ext_params_block_type - RkISP1 extensible params block type
+ *
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS: Black level subtraction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC: Defect pixel cluster correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG: Sensor de-gamma
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN: Auto white balance gains
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT: ISP filtering
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM: Bayer de-mosaic
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK: Cross-talk correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC: Gamma out correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF: De-noise pre-filter
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH: De-noise pre-filter strength
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC: Color processing
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_IE: Image effects
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC: Lens shading correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS: Auto white balance statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS: Histogram statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS: Auto exposure statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS: Auto-focus statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS: BLS in the compand block
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND: Companding expand curve
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS: Companding compress curve
+ */
+enum rkisp1_ext_params_block_type {
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_IE,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS,
+};
+
+#define RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE (1U << 0)
+#define RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE (1U << 1)
+
+/**
+ * struct rkisp1_ext_params_block_header - RkISP1 extensible parameters block
+ * header
+ *
+ * This structure represents the common part of all the ISP configuration
+ * blocks. Each parameters block shall embed an instance of this structure type
+ * as its first member, followed by the block-specific configuration data. The
+ * driver inspects this common header to discern the block type and its size and
+ * properly handle the block content by casting it to the correct block-specific
+ * type.
+ *
+ * The @type field is one of the values enumerated by
+ * :c:type:`rkisp1_ext_params_block_type` and specifies how the data should be
+ * interpreted by the driver. The @size field specifies the size of the
+ * parameters block and is used by the driver for validation purposes.
+ *
+ * The @flags field is a bitmask of per-block flags RKISP1_EXT_PARAMS_FL_*.
+ *
+ * When userspace wants to configure and enable an ISP block it shall fully
+ * populate the block configuration and set the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE bit in the @flags field.
+ *
+ * When userspace simply wants to disable an ISP block the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bit should be set in @flags field. The
+ * driver ignores the rest of the block configuration structure in this case.
+ *
+ * If a new configuration of an ISP block has to be applied userspace shall
+ * fully populate the ISP block configuration and omit setting the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits
+ * in the @flags field.
+ *
+ * Setting both the RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits in the @flags field is not allowed
+ * and not accepted by the driver.
+ *
+ * Userspace is responsible for correctly populating the parameters block header
+ * fields (@type, @flags and @size) and the block-specific parameters.
+ *
+ * For example:
+ *
+ * .. code-block:: c
+ *
+ * void populate_bls(struct rkisp1_ext_params_block_header *block) {
+ * struct rkisp1_ext_params_bls_config *bls =
+ * (struct rkisp1_ext_params_bls_config *)block;
+ *
+ * bls->header.type = RKISP1_EXT_PARAMS_BLOCK_ID_BLS;
+ * bls->header.flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE;
+ * bls->header.size = sizeof(*bls);
+ *
+ * bls->config.enable_auto = 0;
+ * bls->config.fixed_val.r = blackLevelRed_;
+ * bls->config.fixed_val.gr = blackLevelGreenR_;
+ * bls->config.fixed_val.gb = blackLevelGreenB_;
+ * bls->config.fixed_val.b = blackLevelBlue_;
+ * }
+ *
+ * @type: The parameters block type, see
+ * :c:type:`rkisp1_ext_params_block_type`
+ * @flags: A bitmask of block flags
+ * @size: Size (in bytes) of the parameters block, including this header
+ */
+struct rkisp1_ext_params_block_header {
+ __u16 type;
+ __u16 flags;
+ __u32 size;
+};
+
+/**
+ * struct rkisp1_ext_params_bls_config - RkISP1 extensible params BLS config
+ *
+ * RkISP1 extensible parameters Black Level Subtraction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Black Level Subtraction configuration, see
+ * :c:type:`rkisp1_cif_isp_bls_config`
+ */
+struct rkisp1_ext_params_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpcc_config - RkISP1 extensible params DPCC config
+ *
+ * RkISP1 extensible parameters Defective Pixel Cluster Correction configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Defective Pixel Cluster Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_dpcc_config`
+ */
+struct rkisp1_ext_params_dpcc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpcc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_sdg_config - RkISP1 extensible params SDG config
+ *
+ * RkISP1 extensible parameters Sensor Degamma configuration block. Identified
+ * by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Sensor Degamma configuration, see
+ * :c:type:`rkisp1_cif_isp_sdg_config`
+ */
+struct rkisp1_ext_params_sdg_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_sdg_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_lsc_config - RkISP1 extensible params LSC config
+ *
+ * RkISP1 extensible parameters Lens Shading Correction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Lens Shading Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_lsc_config`
+ */
+struct rkisp1_ext_params_lsc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_lsc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_gain_config - RkISP1 extensible params AWB
+ * gain config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Gains configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance Gains configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_gain_config`
+ */
+struct rkisp1_ext_params_awb_gain_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_gain_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_flt_config - RkISP1 extensible params FLT config
+ *
+ * RkISP1 extensible parameters Filter configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Filter configuration, see :c:type:`rkisp1_cif_isp_flt_config`
+ */
+struct rkisp1_ext_params_flt_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_flt_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_bdm_config - RkISP1 extensible params BDM config
+ *
+ * RkISP1 extensible parameters Demosaicing configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Demosaicing configuration, see :c:type:`rkisp1_cif_isp_bdm_config`
+ */
+struct rkisp1_ext_params_bdm_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bdm_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ctk_config - RkISP1 extensible params CTK config
+ *
+ * RkISP1 extensible parameters Cross-Talk configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Cross-Talk configuration, see :c:type:`rkisp1_cif_isp_ctk_config`
+ */
+struct rkisp1_ext_params_ctk_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ctk_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_goc_config - RkISP1 extensible params GOC config
+ *
+ * RkISP1 extensible parameters Gamma-Out configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Gamma-Out configuration, see :c:type:`rkisp1_cif_isp_goc_config`
+ */
+struct rkisp1_ext_params_goc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_goc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_config - RkISP1 extensible params DPF config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_config`
+ */
+struct rkisp1_ext_params_dpf_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_strength_config - RkISP1 extensible params DPF
+ * strength config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter strength configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter strength configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_strength_config`
+ */
+struct rkisp1_ext_params_dpf_strength_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_strength_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_cproc_config - RkISP1 extensible params CPROC config
+ *
+ * RkISP1 extensible parameters Color Processing configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Color processing configuration, see
+ * :c:type:`rkisp1_cif_isp_cproc_config`
+ */
+struct rkisp1_ext_params_cproc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_cproc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ie_config - RkISP1 extensible params IE config
+ *
+ * RkISP1 extensible parameters Image Effect configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_IE`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Image Effect configuration, see :c:type:`rkisp1_cif_isp_ie_config`
+ */
+struct rkisp1_ext_params_ie_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ie_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_meas_config - RkISP1 extensible params AWB
+ * Meas config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Measurement configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance measure configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_meas_config`
+ */
+struct rkisp1_ext_params_awb_meas_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_meas_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_hst_config - RkISP1 extensible params Histogram config
+ *
+ * RkISP1 extensible parameters Histogram statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Histogram statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_hst_config`
+ */
+struct rkisp1_ext_params_hst_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_hst_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_aec_config - RkISP1 extensible params AEC config
+ *
+ * RkISP1 extensible parameters Auto-Exposure statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Exposure statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_aec_config`
+ */
+struct rkisp1_ext_params_aec_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_aec_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_afc_config - RkISP1 extensible params AFC config
+ *
+ * RkISP1 extensible parameters Auto-Focus statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Focus statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_afc_config`
+ */
+struct rkisp1_ext_params_afc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_afc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_bls_config - RkISP1 extensible params
+ * Compand BLS config
+ *
+ * RkISP1 extensible parameters Companding configuration block (black level
+ * subtraction). Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding BLS configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_bls_config`
+ */
+struct rkisp1_ext_params_compand_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_curve_config - RkISP1 extensible params
+ * Compand curve config
+ *
+ * RkISP1 extensible parameters Companding configuration block (expand and
+ * compression curves). Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND` or
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding curve configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_curve_config`
+ */
+struct rkisp1_ext_params_compand_curve_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_curve_config config;
+} __attribute__((aligned(8)));
+
+/*
+ * The rkisp1_ext_params_compand_curve_config structure is counted twice as it
+ * is used for both the COMPAND_EXPAND and COMPAND_COMPRESS block types.
+ */
+#define RKISP1_EXT_PARAMS_MAX_SIZE \
+ (sizeof(struct rkisp1_ext_params_bls_config) +\
+ sizeof(struct rkisp1_ext_params_dpcc_config) +\
+ sizeof(struct rkisp1_ext_params_sdg_config) +\
+ sizeof(struct rkisp1_ext_params_lsc_config) +\
+ sizeof(struct rkisp1_ext_params_awb_gain_config) +\
+ sizeof(struct rkisp1_ext_params_flt_config) +\
+ sizeof(struct rkisp1_ext_params_bdm_config) +\
+ sizeof(struct rkisp1_ext_params_ctk_config) +\
+ sizeof(struct rkisp1_ext_params_goc_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_strength_config) +\
+ sizeof(struct rkisp1_ext_params_cproc_config) +\
+ sizeof(struct rkisp1_ext_params_ie_config) +\
+ sizeof(struct rkisp1_ext_params_awb_meas_config) +\
+ sizeof(struct rkisp1_ext_params_hst_config) +\
+ sizeof(struct rkisp1_ext_params_aec_config) +\
+ sizeof(struct rkisp1_ext_params_afc_config) +\
+ sizeof(struct rkisp1_ext_params_compand_bls_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config))
+
+/**
+ * enum rksip1_ext_param_buffer_version - RkISP1 extensible parameters version
+ *
+ * @RKISP1_EXT_PARAM_BUFFER_V1: First version of RkISP1 extensible parameters
+ */
+enum rksip1_ext_param_buffer_version {
+ RKISP1_EXT_PARAM_BUFFER_V1 = 1,
+};
+
+/**
+ * struct rkisp1_ext_params_cfg - RkISP1 extensible parameters configuration
+ *
+ * This struct contains the configuration parameters of the RkISP1 ISP
+ * algorithms, serialized by userspace into a data buffer. Each configuration
+ * parameter block is represented by a block-specific structure which contains a
+ * :c:type:`rkisp1_ext_params_block_header` entry as first member. Userspace
+ * populates the @data buffer with configuration parameters for the blocks that
+ * it intends to configure. As a consequence, the data buffer effective size
+ * changes according to the number of ISP blocks that userspace intends to
+ * configure and is set by userspace in the @data_size field.
+ *
+ * The parameters buffer is versioned by the @version field to allow modifying
+ * and extending its definition. Userspace shall populate the @version field to
+ * inform the driver about the version it intends to use. The driver will parse
+ * and handle the @data buffer according to the data layout specific to the
+ * indicated version and return an error if the desired version is not
+ * supported.
+ *
+ * Currently the single RKISP1_EXT_PARAM_BUFFER_V1 version is supported.
+ * When a new format version will be added, a mechanism for userspace to query
+ * the supported format versions will be implemented in the form of a read-only
+ * V4L2 control. If such control is not available, userspace should assume only
+ * RKISP1_EXT_PARAM_BUFFER_V1 is supported by the driver.
+ *
+ * For each ISP block that userspace wants to configure, a block-specific
+ * structure is appended to the @data buffer, one after the other without gaps
+ * in between nor overlaps. Userspace shall populate the @data_size field with
+ * the effective size, in bytes, of the @data buffer.
+ *
+ * The expected memory layout of the parameters buffer is::
+ *
+ * +-------------------- struct rkisp1_ext_params_cfg -------------------+
+ * | version = RKISP_EXT_PARAMS_BUFFER_V1; |
+ * | data_size = sizeof(struct rkisp1_ext_params_bls_config) |
+ * | + sizeof(struct rkisp1_ext_params_dpcc_config); |
+ * | +------------------------- data ---------------------------------+ |
+ * | | +------------- struct rkisp1_ext_params_bls_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_bls_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_bls_config -------------+ | | |
+ * | | | | enable_auto = 0; | | | |
+ * | | | | fixed_val.r = 256; | | | |
+ * | | | | fixed_val.gr = 256; | | | |
+ * | | | | fixed_val.gb = 256; | | | |
+ * | | | | fixed_val.b = 256; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +------------ struct rkisp1_ext_params_dpcc_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_dpcc_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_dpcc_config ------------+ | | |
+ * | | | | mode = RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE; | | | |
+ * | | | | output_mode = | | | |
+ * | | | | RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER; | | | |
+ * | | | | set_use = ... ; | | | |
+ * | | | | ... = ... ; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +-------------------------------------------------------------+ | |
+ * | +-----------------------------------------------------------------+ |
+ * +---------------------------------------------------------------------+
+ *
+ * @version: The RkISP1 extensible parameters buffer version, see
+ * :c:type:`rksip1_ext_param_buffer_version`
+ * @data_size: The RkISP1 configuration data effective size, excluding this
+ * header
+ * @data: The RkISP1 extensible configuration data blocks
+ */
+struct rkisp1_ext_params_cfg {
+ __u32 version;
+ __u32 data_size;
+ __u8 data[RKISP1_EXT_PARAMS_MAX_SIZE];
+};
+
#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 3bac0a8ceab2..359a14cc76a4 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -118,6 +118,7 @@ struct clone_args {
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
#define SCHED_DEADLINE 6
+#define SCHED_EXT 7
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h
index 90662385689b..bf6e9ae031c1 100644
--- a/include/uapi/linux/sched/types.h
+++ b/include/uapi/linux/sched/types.h
@@ -58,9 +58,9 @@
*
* This is reflected by the following fields of the sched_attr structure:
*
- * @sched_deadline representative of the task's deadline
- * @sched_runtime representative of the task's runtime
- * @sched_period representative of the task's period
+ * @sched_deadline representative of the task's deadline in nanoseconds
+ * @sched_runtime representative of the task's runtime in nanoseconds
+ * @sched_period representative of the task's period in nanoseconds
*
* Given this task model, there are a multiplicity of scheduling algorithms
* and policies, that can be used to ensure all the tasks will make their
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index ed2a96f43ce4..5a2af0942c9f 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -83,5 +83,6 @@
#define SERIO_PULSE8_CEC 0x40
#define SERIO_RAINSHADOW_CEC 0x41
#define SERIO_FSIA6B 0x42
+#define SERIO_EXTRON_DA_HD_4K_PLUS 0x43
#endif /* _UAPI_SERIO_H */
diff --git a/include/uapi/linux/spi/spi.h b/include/uapi/linux/spi/spi.h
index ca56e477d161..ee4ac812b8f8 100644
--- a/include/uapi/linux/spi/spi.h
+++ b/include/uapi/linux/spi/spi.h
@@ -28,7 +28,8 @@
#define SPI_RX_OCTAL _BITUL(14) /* receive with 8 wires */
#define SPI_3WIRE_HIZ _BITUL(15) /* high impedance turnaround */
#define SPI_RX_CPHA_FLIP _BITUL(16) /* flip CPHA on Rx only xfer */
-#define SPI_MOSI_IDLE_LOW _BITUL(17) /* leave mosi line low when idle */
+#define SPI_MOSI_IDLE_LOW _BITUL(17) /* leave MOSI line low when idle */
+#define SPI_MOSI_IDLE_HIGH _BITUL(18) /* leave MOSI line high when idle */
/*
* All the bits defined above should be covered by SPI_MODE_USER_MASK.
@@ -38,6 +39,6 @@
* These bits must not overlap. A static assert check should make sure of that.
* If adding extra bits, make sure to increase the bit index below as well.
*/
-#define SPI_MODE_USER_MASK (_BITUL(18) - 1)
+#define SPI_MODE_USER_MASK (_BITUL(19) - 1)
#endif /* _UAPI_SPI_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 44d73ba8788d..91f0f7e214a5 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -254,6 +254,9 @@ struct usb_ctrlrequest {
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
#define USB_DT_WIRE_ADAPTER 0x21
+/* From USB Device Firmware Upgrade Specification, Revision 1.1 */
+#define USB_DT_DFU_FUNCTIONAL 0x21
+/* these are from the Wireless USB spec */
#define USB_DT_RPIPE 0x22
#define USB_DT_CS_RADIO_CONTROL 0x23
/* From the T10 UAS specification */
@@ -329,9 +332,10 @@ struct usb_device_descriptor {
#define USB_CLASS_USB_TYPE_C_BRIDGE 0x12
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
-#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_SUBCLASS_DFU 0x01
-#define USB_SUBCLASS_VENDOR_SPEC 0xff
+#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_SUBCLASS_VENDOR_SPEC 0xff
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 9f88de9c3d66..2ebdba111a8f 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -3,6 +3,7 @@
#define _UAPI__LINUX_FUNCTIONFS_H__
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/ioctl.h>
@@ -37,6 +38,31 @@ struct usb_endpoint_descriptor_no_audio {
__u8 bInterval;
} __attribute__((packed));
+/**
+ * struct usb_dfu_functional_descriptor - DFU Functional descriptor
+ * @bLength: Size of the descriptor (bytes)
+ * @bDescriptorType: USB_DT_DFU_FUNCTIONAL
+ * @bmAttributes: DFU attributes
+ * @wDetachTimeOut: Maximum time to wait after DFU_DETACH (ms, le16)
+ * @wTransferSize: Maximum number of bytes per control-write (le16)
+ * @bcdDFUVersion: DFU Spec version (BCD, le16)
+ */
+struct usb_dfu_functional_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bmAttributes;
+ __le16 wDetachTimeOut;
+ __le16 wTransferSize;
+ __le16 bcdDFUVersion;
+} __attribute__ ((packed));
+
+/* from DFU functional descriptor bmAttributes */
+#define DFU_FUNC_ATT_CAN_DOWNLOAD _BITUL(0)
+#define DFU_FUNC_ATT_CAN_UPLOAD _BITUL(1)
+#define DFU_FUNC_ATT_MANIFEST_TOLERANT _BITUL(2)
+#define DFU_FUNC_ATT_WILL_DETACH _BITUL(3)
+
+
struct usb_functionfs_descs_head_v2 {
__le32 magic;
__le32 length;
@@ -104,23 +130,38 @@ struct usb_ffs_dmabuf_transfer_req {
#ifndef __KERNEL__
-/*
+/**
+ * DOC: descriptors
+ *
* Descriptors format:
*
+ * +-----+-----------+--------------+--------------------------------------+
* | off | name | type | description |
- * |-----+-----------+--------------+--------------------------------------|
+ * +-----+-----------+--------------+--------------------------------------+
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC_V2 |
+ * +-----+-----------+--------------+--------------------------------------+
* | 4 | length | LE32 | length of the whole data chunk |
+ * +-----+-----------+--------------+--------------------------------------+
* | 8 | flags | LE32 | combination of functionfs_flags |
+ * +-----+-----------+--------------+--------------------------------------+
* | | eventfd | LE32 | eventfd file descriptor |
+ * +-----+-----------+--------------+--------------------------------------+
* | | fs_count | LE32 | number of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_count | LE32 | number of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | ss_count | LE32 | number of super-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | os_count | LE32 | number of MS OS descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | fs_descrs | Descriptor[] | list of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_descrs | Descriptor[] | list of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | ss_descrs | Descriptor[] | list of super-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | os_descrs | OSDesc[] | list of MS OS descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
*
* Depending on which flags are set, various fields may be missing in the
* structure. Any flags that are not recognised cause the whole block to be
@@ -128,71 +169,111 @@ struct usb_ffs_dmabuf_transfer_req {
*
* Legacy descriptors format (deprecated as of 3.14):
*
+ * +-----+-----------+--------------+--------------------------------------+
* | off | name | type | description |
- * |-----+-----------+--------------+--------------------------------------|
+ * +-----+-----------+--------------+--------------------------------------+
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC |
+ * +-----+-----------+--------------+--------------------------------------+
* | 4 | length | LE32 | length of the whole data chunk |
+ * +-----+-----------+--------------+--------------------------------------+
* | 8 | fs_count | LE32 | number of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | 12 | hs_count | LE32 | number of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | 16 | fs_descrs | Descriptor[] | list of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_descrs | Descriptor[] | list of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
*
* All numbers must be in little endian order.
*
* Descriptor[] is an array of valid USB descriptors which have the following
* format:
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | bLength | U8 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 1 | bDescriptorType | U8 | descriptor type |
+ * +-----+-----------------+------+--------------------------+
* | 2 | payload | | descriptor's payload |
+ * +-----+-----------------+------+--------------------------+
*
* OSDesc[] is an array of valid MS OS Feature Descriptors which have one of
* the following formats:
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | inteface | U8 | related interface number |
+ * +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 5 | bcdVersion | U16 | currently supported: 1 |
+ * +-----+-----------------+------+--------------------------+
* | 7 | wIndex | U16 | currently supported: 4 |
+ * +-----+-----------------+------+--------------------------+
* | 9 | bCount | U8 | number of ext. compat. |
+ * +-----+-----------------+------+--------------------------+
* | 10 | Reserved | U8 | 0 |
+ * +-----+-----------------+------+--------------------------+
* | 11 | ExtCompat[] | | list of ext. compat. d. |
+ * +-----+-----------------+------+--------------------------+
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | inteface | U8 | related interface number |
+ * +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 5 | bcdVersion | U16 | currently supported: 1 |
+ * +-----+-----------------+------+--------------------------+
* | 7 | wIndex | U16 | currently supported: 5 |
+ * +-----+-----------------+------+--------------------------+
* | 9 | wCount | U16 | number of ext. compat. |
+ * +-----+-----------------+------+--------------------------+
* | 11 | ExtProp[] | | list of ext. prop. d. |
+ * +-----+-----------------+------+--------------------------+
*
* ExtCompat[] is an array of valid Extended Compatiblity descriptors
* which have the following format:
*
+ * +-----+-----------------------+------+-------------------------------------+
* | off | name | type | description |
- * |-----+-----------------------+------+-------------------------------------|
+ * +-----+-----------------------+------+-------------------------------------+
* | 0 | bFirstInterfaceNumber | U8 | index of the interface or of the 1st|
+ * +-----+-----------------------+------+-------------------------------------+
* | | | | interface in an IAD group |
+ * +-----+-----------------------+------+-------------------------------------+
* | 1 | Reserved | U8 | 1 |
+ * +-----+-----------------------+------+-------------------------------------+
* | 2 | CompatibleID | U8[8]| compatible ID string |
+ * +-----+-----------------------+------+-------------------------------------+
* | 10 | SubCompatibleID | U8[8]| subcompatible ID string |
+ * +-----+-----------------------+------+-------------------------------------+
* | 18 | Reserved | U8[6]| 0 |
+ * +-----+-----------------------+------+-------------------------------------+
*
* ExtProp[] is an array of valid Extended Properties descriptors
* which have the following format:
*
+ * +-----+-----------------------+------+-------------------------------------+
* | off | name | type | description |
- * |-----+-----------------------+------+-------------------------------------|
+ * +-----+-----------------------+------+-------------------------------------+
* | 0 | dwSize | U32 | length of the descriptor |
+ * +-----+-----------------------+------+-------------------------------------+
* | 4 | dwPropertyDataType | U32 | 1..7 |
+ * +-----+-----------------------+------+-------------------------------------+
* | 8 | wPropertyNameLength | U16 | bPropertyName length (NL) |
+ * +-----+-----------------------+------+-------------------------------------+
* | 10 | bPropertyName |U8[NL]| name of this property |
+ * +-----+-----------------------+------+-------------------------------------+
* |10+NL| dwPropertyDataLength | U32 | bPropertyData length (DL) |
+ * +-----+-----------------------+------+-------------------------------------+
* |14+NL| bProperty |U8[DL]| payload of this property |
+ * +-----+-----------------------+------+-------------------------------------+
*/
struct usb_functionfs_strings_head {
diff --git a/include/uapi/linux/usb/g_hid.h b/include/uapi/linux/usb/g_hid.h
new file mode 100644
index 000000000000..b965092db476
--- /dev/null
+++ b/include/uapi/linux/usb/g_hid.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+
+#ifndef __UAPI_LINUX_USB_G_HID_H
+#define __UAPI_LINUX_USB_G_HID_H
+
+#include <linux/types.h>
+
+/* Maximum HID report length for High-Speed USB (i.e. USB 2.0) */
+#define MAX_REPORT_LENGTH 64
+
+/**
+ * struct usb_hidg_report - response to GET_REPORT
+ * @report_id: report ID that this is a response for
+ * @userspace_req:
+ * !0 this report is used for any pending GET_REPORT request
+ * but wait on userspace to issue a new report on future requests
+ * 0 this report is to be used for any future GET_REPORT requests
+ * @length: length of the report response
+ * @data: report response
+ * @padding: padding for 32/64 bit compatibility
+ *
+ * Structure used by GADGET_HID_WRITE_GET_REPORT ioctl on /dev/hidg*.
+ */
+struct usb_hidg_report {
+ __u8 report_id;
+ __u8 userspace_req;
+ __u16 length;
+ __u8 data[MAX_REPORT_LENGTH];
+ __u8 padding[4];
+};
+
+/* The 'g' code is used by gadgetfs and hid gadget ioctl requests.
+ * Don't add any colliding codes to either driver, and keep
+ * them in unique ranges.
+ */
+
+#define GADGET_HID_READ_GET_REPORT_ID _IOR('g', 0x41, __u8)
+#define GADGET_HID_WRITE_GET_REPORT _IOW('g', 0x42, struct usb_hidg_report)
+
+#endif /* __UAPI_LINUX_USB_G_HID_H */
diff --git a/include/uapi/linux/usb/gadgetfs.h b/include/uapi/linux/usb/gadgetfs.h
index 835473910a49..9754822b2a40 100644
--- a/include/uapi/linux/usb/gadgetfs.h
+++ b/include/uapi/linux/usb/gadgetfs.h
@@ -62,7 +62,7 @@ struct usb_gadgetfs_event {
};
-/* The 'g' code is also used by printer gadget ioctl requests.
+/* The 'g' code is also used by printer and hid gadget ioctl requests.
* Don't add any colliding codes to either driver, and keep
* them in unique ranges (size 0x20 for now).
*/
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index f8a8d6b3c521..6073858d52a2 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -282,7 +282,10 @@ struct vmmdev_hgcm_pagelist {
__u32 flags; /** VMMDEV_HGCM_F_PARM_*. */
__u16 offset_first_page; /** Data offset in the first page. */
__u16 page_count; /** Number of pages. */
- __u64 pages[1]; /** Page addresses. */
+ union {
+ __u64 unused; /** Deprecated place-holder for first "pages" entry. */
+ __DECLARE_FLEX_ARRAY(__u64, pages); /** Page addresses. */
+ };
};
VMMDEV_ASSERT_SIZE(vmmdev_hgcm_pagelist, 4 + 2 + 2 + 8);
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 842bf1201ac4..71edf2c70cc3 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -19,6 +19,7 @@ enum vdpa_command {
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
VDPA_CMD_DEV_VSTATS_GET,
+ VDPA_CMD_DEV_ATTR_SET,
};
enum vdpa_attr {
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 4e91362da6da..27239cb64065 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -502,6 +502,7 @@ struct v4l2_capability {
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
+#define V4L2_CAP_EDID 0x02000000 /* Is an EDID-only device */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
@@ -854,6 +855,7 @@ struct v4l2_pix_format {
/* Vendor specific - used for RK_ISP1 camera sub-system */
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
+#define V4L2_META_FMT_RK_ISP1_EXT_PARAMS v4l2_fourcc('R', 'K', '1', 'E') /* Rockchip ISP1 3a Extensible Parameters */
/* Vendor specific - used for RaspberryPi PiSP */
#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') /* PiSP BE configuration */
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index ddaa45e723c4..ee35a372805d 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -71,7 +71,13 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */
#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
-#define VIRTIO_BALLOON_S_NR 10
+#define VIRTIO_BALLOON_S_OOM_KILL 10 /* OOM killer invocations */
+#define VIRTIO_BALLOON_S_ALLOC_STALL 11 /* Stall count of memory allocatoin */
+#define VIRTIO_BALLOON_S_ASYNC_SCAN 12 /* Amount of memory scanned asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_SCAN 13 /* Amount of memory scanned directly */
+#define VIRTIO_BALLOON_S_ASYNC_RECLAIM 14 /* Amount of memory reclaimed asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_RECLAIM 15 /* Amount of memory reclaimed directly */
+#define VIRTIO_BALLOON_S_NR 16
#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
@@ -83,7 +89,13 @@ struct virtio_balloon_config {
VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
- VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
+ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures", \
+ VIRTIO_BALLOON_S_NAMES_prefix "oom-kills", \
+ VIRTIO_BALLOON_S_NAMES_prefix "alloc-stalls", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-reclaims", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-reclaims" \
}
#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 0e21f3998108..bf2c9cabd207 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -311,6 +311,7 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* 3 is reserved for gfxstream */
#define VIRTIO_GPU_CAPSET_VENUS 4
+#define VIRTIO_GPU_CAPSET_DRM 6
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index e61104f35d73..faa9d62b3b30 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -66,6 +66,7 @@ enum bnxt_re_wqe_mode {
enum {
BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
+ BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT = 0x02,
};
struct bnxt_re_uctx_req {
@@ -118,10 +119,16 @@ struct bnxt_re_resize_cq_req {
__aligned_u64 cq_va;
};
+enum bnxt_re_qp_mask {
+ BNXT_RE_QP_REQ_MASK_VAR_WQE_SQ_SLOTS = 0x1,
+};
+
struct bnxt_re_qp_req {
__aligned_u64 qpsva;
__aligned_u64 qprva;
__aligned_u64 qp_handle;
+ __aligned_u64 comp_mask;
+ __u32 sq_slots;
};
struct bnxt_re_qp_resp {
@@ -134,8 +141,14 @@ struct bnxt_re_srq_req {
__aligned_u64 srq_handle;
};
+enum bnxt_re_srq_mask {
+ BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT = 0x1,
+};
+
struct bnxt_re_srq_resp {
__u32 srqid;
+ __u32 rsvd; /* padding */
+ __aligned_u64 comp_mask;
};
enum bnxt_re_shpg_offt {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 5b74d6534899..fd2e4a3a56b3 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -274,6 +274,10 @@ enum mlx5_ib_create_cq_attrs {
MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX = UVERBS_ID_DRIVER_NS_WITH_UHW,
};
+enum mlx5_ib_reg_dmabuf_mr_attrs {
+ MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#define MLX5_IB_DW_MATCH_PARAM 0xA0
struct mlx5_ib_match_params {
@@ -344,6 +348,7 @@ enum mlx5_ib_pd_methods {
enum mlx5_ib_device_methods {
MLX5_IB_METHOD_QUERY_PORT = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
};
enum mlx5_ib_query_port_attrs {
@@ -351,4 +356,8 @@ enum mlx5_ib_query_port_attrs {
MLX5_IB_ATTR_QUERY_PORT,
};
+enum mlx5_ib_get_data_direct_sysfs_path_attrs {
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 3189c7f08d17..7c233df475e7 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -54,6 +54,10 @@ enum mlx5_ib_uapi_flow_action_packet_reformat_type {
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
};
+enum mlx5_ib_uapi_reg_dmabuf_flags {
+ MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT = 1 << 0,
+};
+
struct mlx5_ib_uapi_devx_async_cmd_hdr {
__aligned_u64 wr_id;
__u8 out_data[];
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 2f37568f5556..39be09c0ffbb 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -15,6 +15,7 @@ enum {
enum {
RDMA_NL_GROUP_IWPM = 2,
RDMA_NL_GROUP_LS,
+ RDMA_NL_GROUP_NOTIFY,
RDMA_NL_NUM_GROUPS
};
@@ -305,6 +306,8 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_DELDEV,
+ RDMA_NLDEV_CMD_MONITOR,
+
RDMA_NLDEV_NUM_OPS
};
@@ -574,6 +577,9 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_EVENT_TYPE, /* u8 */
+
+ RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, /* u8 */
/*
* Always the end
*/
@@ -624,4 +630,14 @@ enum rdma_nl_name_assign_type {
RDMA_NAME_ASSIGN_TYPE_USER = 1, /* Provided by user-space */
};
+/*
+ * Supported rdma monitoring event types.
+ */
+enum rdma_nl_notify_event_type {
+ RDMA_REGISTER_EVENT,
+ RDMA_UNREGISTER_EVENT,
+ RDMA_NETDEV_ATTACH_EVENT,
+ RDMA_NETDEV_DETACH_EVENT,
+};
+
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index 39b37edcf813..bc30c1f2a109 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -461,6 +461,8 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_FLG_TIMESTAMP (1<<1)
#define SNDRV_SEQ_PORT_FLG_TIME_REAL (1<<2)
+#define SNDRV_SEQ_PORT_FLG_IS_MIDI1 (1<<3) /* Keep MIDI 1.0 protocol */
+
/* port direction */
#define SNDRV_SEQ_PORT_DIR_UNKNOWN 0
#define SNDRV_SEQ_PORT_DIR_INPUT 1
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 8bf7e8a0eb6f..4cd513215bcd 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -869,7 +869,7 @@ struct snd_ump_block_info {
* Timer section - /dev/snd/timer
*/
-#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
+#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 8)
enum {
SNDRV_TIMER_CLASS_NONE = -1,
@@ -894,6 +894,7 @@ enum {
#define SNDRV_TIMER_GLOBAL_RTC 1 /* unused */
#define SNDRV_TIMER_GLOBAL_HPET 2
#define SNDRV_TIMER_GLOBAL_HRTIMER 3
+#define SNDRV_TIMER_GLOBAL_UDRIVEN 4
/* info flags */
#define SNDRV_TIMER_FLG_SLAVE (1<<0) /* cannot be controlled */
@@ -974,6 +975,18 @@ struct snd_timer_status {
};
#endif
+/*
+ * This structure describes the userspace-driven timer. Such timers are purely virtual,
+ * and can only be triggered from software (for instance, by userspace application).
+ */
+struct snd_timer_uinfo {
+ /* To pretend being a normal timer, we need to know the resolution in ns. */
+ __u64 resolution;
+ int fd;
+ unsigned int id;
+ unsigned char reserved[16];
+};
+
#define SNDRV_TIMER_IOCTL_PVERSION _IOR('T', 0x00, int)
#define SNDRV_TIMER_IOCTL_NEXT_DEVICE _IOWR('T', 0x01, struct snd_timer_id)
#define SNDRV_TIMER_IOCTL_TREAD_OLD _IOW('T', 0x02, int)
@@ -990,6 +1003,8 @@ struct snd_timer_status {
#define SNDRV_TIMER_IOCTL_CONTINUE _IO('T', 0xa2)
#define SNDRV_TIMER_IOCTL_PAUSE _IO('T', 0xa3)
#define SNDRV_TIMER_IOCTL_TREAD64 _IOW('T', 0xa4, int)
+#define SNDRV_TIMER_IOCTL_CREATE _IOWR('T', 0xa5, struct snd_timer_uinfo)
+#define SNDRV_TIMER_IOCTL_TRIGGER _IO('T', 0xa6)
#if __BITS_PER_LONG == 64
#define SNDRV_TIMER_IOCTL_TREAD SNDRV_TIMER_IOCTL_TREAD_OLD
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index 8b8c5d1420fe..8e2c8fd44764 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -126,6 +126,11 @@ struct privcmd_ioeventfd {
__u8 pad[2];
};
+struct privcmd_pcidev_get_gsi {
+ __u32 sbdf;
+ __u32 gsi;
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -157,5 +162,7 @@ struct privcmd_ioeventfd {
_IOW('P', 8, struct privcmd_irqfd)
#define IOCTL_PRIVCMD_IOEVENTFD \
_IOW('P', 9, struct privcmd_ioeventfd)
+#define IOCTL_PRIVCMD_PCIDEV_GET_GSI \
+ _IOC(_IOC_NONE, 'P', 10, sizeof(struct privcmd_pcidev_get_gsi))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 853e95957c31..e594abe5d05f 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -597,7 +597,7 @@ struct ufs_dev_info {
};
/*
- * This enum is used in string mapping in include/trace/events/ufs.h.
+ * This enum is used in string mapping in ufs_trace.h.
*/
enum ufs_trace_str_t {
UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
@@ -607,7 +607,7 @@ enum ufs_trace_str_t {
/*
* Transaction Specific Fields (TSF) type in the UPIU package, this enum is
- * used in include/trace/events/ufs.h for UFS command trace.
+ * used in ufs_trace.h for UFS command trace.
*/
enum ufs_trace_tsf_t {
UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 0fd2aebac728..3f68ae3e4330 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -17,6 +17,7 @@
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/msi.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 9917c7743d80..27364c4a6ef9 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -25,8 +25,9 @@ enum {
REG_CONTROLLER_CAPABILITIES = 0x00,
REG_MCQCAP = 0x04,
REG_UFS_VERSION = 0x08,
- REG_CONTROLLER_DEV_ID = 0x10,
- REG_CONTROLLER_PROD_ID = 0x14,
+ REG_EXT_CONTROLLER_CAPABILITIES = 0x0C,
+ REG_CONTROLLER_PID = 0x10,
+ REG_CONTROLLER_MID = 0x14,
REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
REG_INTERRUPT_STATUS = 0x20,
REG_INTERRUPT_ENABLE = 0x24,
diff --git a/include/vdso/getrandom.h b/include/vdso/getrandom.h
index a8b7c14b0ae0..6ca4d6de9e46 100644
--- a/include/vdso/getrandom.h
+++ b/include/vdso/getrandom.h
@@ -43,4 +43,32 @@ struct vgetrandom_state {
bool in_use;
};
+/**
+ * __arch_chacha20_blocks_nostack - Generate ChaCha20 stream without using the stack.
+ * @dst_bytes: Destination buffer to hold @nblocks * 64 bytes of output.
+ * @key: 32-byte input key.
+ * @counter: 8-byte counter, read on input and updated on return.
+ * @nblocks: Number of blocks to generate.
+ *
+ * Generates a given positive number of blocks of ChaCha20 output with nonce=0, and does not write
+ * to any stack or memory outside of the parameters passed to it, in order to mitigate stack data
+ * leaking into forked child processes.
+ */
+extern void __arch_chacha20_blocks_nostack(u8 *dst_bytes, const u32 *key, u32 *counter, size_t nblocks);
+
+/**
+ * __vdso_getrandom - Architecture-specific vDSO implementation of getrandom() syscall.
+ * @buffer: Passed to __cvdso_getrandom().
+ * @len: Passed to __cvdso_getrandom().
+ * @flags: Passed to __cvdso_getrandom().
+ * @opaque_state: Passed to __cvdso_getrandom().
+ * @opaque_len: Passed to __cvdso_getrandom();
+ *
+ * This function is implemented by making a single call to to __cvdso_getrandom(), whose
+ * documentation may be consulted for more information.
+ *
+ * Returns: The return value of __cvdso_getrandom().
+ */
+extern ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len);
+
#endif /* _VDSO_GETRANDOM_H */
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 73501149439d..3ddb03bb05cb 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
+#include <asm/barrier.h>
#include <vdso/datapage.h>
static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h
new file mode 100644
index 000000000000..eee3d2a4dbe4
--- /dev/null
+++ b/include/vdso/unaligned.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_UNALIGNED_H
+#define __VDSO_UNALIGNED_H
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#endif /* __VDSO_UNALIGNED_H */
diff --git a/include/video/vga.h b/include/video/vga.h
index 947c0abd04ef..468764d6727a 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -197,9 +197,26 @@ struct vgastate {
extern int save_vga(struct vgastate *state);
extern int restore_vga(struct vgastate *state);
+static inline unsigned char vga_mm_r (void __iomem *regbase, unsigned short port)
+{
+ return readb (regbase + port);
+}
+
+static inline void vga_mm_w (void __iomem *regbase, unsigned short port, unsigned char val)
+{
+ writeb (val, regbase + port);
+}
+
+static inline void vga_mm_w_fast (void __iomem *regbase, unsigned short port,
+ unsigned char reg, unsigned char val)
+{
+ writew (VGA_OUT16VAL (val, reg), regbase + port);
+}
+
/*
* generic VGA port read/write
*/
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_r (unsigned short port)
{
@@ -217,22 +234,6 @@ static inline void vga_io_w_fast (unsigned short port, unsigned char reg,
outw(VGA_OUT16VAL (val, reg), port);
}
-static inline unsigned char vga_mm_r (void __iomem *regbase, unsigned short port)
-{
- return readb (regbase + port);
-}
-
-static inline void vga_mm_w (void __iomem *regbase, unsigned short port, unsigned char val)
-{
- writeb (val, regbase + port);
-}
-
-static inline void vga_mm_w_fast (void __iomem *regbase, unsigned short port,
- unsigned char reg, unsigned char val)
-{
- writew (VGA_OUT16VAL (val, reg), regbase + port);
-}
-
static inline unsigned char vga_r (void __iomem *regbase, unsigned short port)
{
if (regbase)
@@ -258,8 +259,25 @@ static inline void vga_w_fast (void __iomem *regbase, unsigned short port,
else
vga_io_w_fast (port, reg, val);
}
+#else /* CONFIG_HAS_IOPORT */
+static inline unsigned char vga_r (void __iomem *regbase, unsigned short port)
+{
+ return vga_mm_r (regbase, port);
+}
+
+static inline void vga_w (void __iomem *regbase, unsigned short port, unsigned char val)
+{
+ vga_mm_w (regbase, port, val);
+}
+static inline void vga_w_fast (void __iomem *regbase, unsigned short port,
+ unsigned char reg, unsigned char val)
+{
+ vga_mm_w_fast (regbase, port, reg, val);
+}
+#endif /* CONFIG_HAS_IOPORT */
+
/*
* VGA CRTC register read/write
*/
@@ -280,6 +298,7 @@ static inline void vga_wcrt (void __iomem *regbase, unsigned char reg, unsigned
#endif /* VGA_OUTW_WRITE */
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rcrt (unsigned char reg)
{
vga_io_w (VGA_CRT_IC, reg);
@@ -295,6 +314,7 @@ static inline void vga_io_wcrt (unsigned char reg, unsigned char val)
vga_io_w (VGA_CRT_DC, val);
#endif /* VGA_OUTW_WRITE */
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rcrt (void __iomem *regbase, unsigned char reg)
{
@@ -333,6 +353,7 @@ static inline void vga_wseq (void __iomem *regbase, unsigned char reg, unsigned
#endif /* VGA_OUTW_WRITE */
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rseq (unsigned char reg)
{
vga_io_w (VGA_SEQ_I, reg);
@@ -348,6 +369,7 @@ static inline void vga_io_wseq (unsigned char reg, unsigned char val)
vga_io_w (VGA_SEQ_D, val);
#endif /* VGA_OUTW_WRITE */
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rseq (void __iomem *regbase, unsigned char reg)
{
@@ -385,6 +407,7 @@ static inline void vga_wgfx (void __iomem *regbase, unsigned char reg, unsigned
#endif /* VGA_OUTW_WRITE */
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rgfx (unsigned char reg)
{
vga_io_w (VGA_GFX_I, reg);
@@ -400,6 +423,7 @@ static inline void vga_io_wgfx (unsigned char reg, unsigned char val)
vga_io_w (VGA_GFX_D, val);
#endif /* VGA_OUTW_WRITE */
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rgfx (void __iomem *regbase, unsigned char reg)
{
@@ -434,6 +458,7 @@ static inline void vga_wattr (void __iomem *regbase, unsigned char reg, unsigned
vga_w (regbase, VGA_ATT_W, val);
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rattr (unsigned char reg)
{
vga_io_w (VGA_ATT_IW, reg);
@@ -445,6 +470,7 @@ static inline void vga_io_wattr (unsigned char reg, unsigned char val)
vga_io_w (VGA_ATT_IW, reg);
vga_io_w (VGA_ATT_W, val);
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rattr (void __iomem *regbase, unsigned char reg)
{
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index b1e11863144d..daa96a22d257 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -67,10 +67,37 @@ static inline void xen_acpi_sleep_register(void)
acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
}
}
+int xen_pvh_setup_gsi(int gsi, int trigger, int polarity);
+int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out);
#else
static inline void xen_acpi_sleep_register(void)
{
}
+
+static inline int xen_pvh_setup_gsi(int gsi, int trigger, int polarity)
+{
+ return -1;
+}
+
+static inline int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out)
+{
+ return -1;
+}
+#endif
+
+#ifdef CONFIG_XEN_PCI_STUB
+int pcistub_get_gsi_from_sbdf(unsigned int sbdf);
+#else
+static inline int pcistub_get_gsi_from_sbdf(unsigned int sbdf)
+{
+ return -1;
+}
#endif
#endif /* _XEN_ACPI_H */
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 38deb1214613..918f47d87d7a 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -11,7 +11,9 @@
#define __XEN_PUBLIC_ELFNOTE_H__
/*
- * The notes should live in a SHT_NOTE segment and have "Xen" in the
+ * `incontents 200 elfnotes ELF notes
+ *
+ * The notes should live in a PT_NOTE segment and have "Xen" in the
* name field.
*
* Numeric types are either 4 or 8 bytes depending on the content of
@@ -22,6 +24,8 @@
*
* String values (for non-legacy) are NULL terminated ASCII, also known
* as ASCIZ type.
+ *
+ * Xen only uses ELF Notes contained in x86 binaries.
*/
/*
@@ -52,7 +56,7 @@
#define XEN_ELFNOTE_VIRT_BASE 3
/*
- * The offset of the ELF paddr field from the acutal required
+ * The offset of the ELF paddr field from the actual required
* pseudo-physical address (numeric).
*
* This is used to maintain backwards compatibility with older kernels
@@ -92,7 +96,12 @@
#define XEN_ELFNOTE_LOADER 8
/*
- * The kernel supports PAE (x86/32 only, string = "yes" or "no").
+ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
+ * "bimodal").
+ *
+ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
+ * may be given as "yes,bimodal" which will cause older Xen to treat
+ * this kernel as PAE.
*
* LEGACY: PAE (n.b. The legacy interface included a provision to
* indicate 'extended-cr3' support allowing L3 page tables to be
@@ -149,7 +158,9 @@
* The (non-default) location the initial phys-to-machine map should be
* placed at by the hypervisor (Dom0) or the tools (DomU).
* The kernel must be prepared for this mapping to be established using
- * large pages, despite such otherwise not being available to guests.
+ * large pages, despite such otherwise not being available to guests. Note
+ * that these large pages may be misaligned in PFN space (they'll obviously
+ * be aligned in MFN and virtual address spaces).
* The kernel must also be able to handle the page table pages used for
* this mapping not being accessible through the initial mapping.
* (Only x86-64 supports this at present.)
@@ -186,8 +197,80 @@
#define XEN_ELFNOTE_PHYS32_ENTRY 18
/*
+ * Physical loading constraints for PVH kernels
+ *
+ * The presence of this note indicates the kernel supports relocating itself.
+ *
+ * The note may include up to three 32bit values to place constraints on the
+ * guest physical loading addresses and alignment for a PVH kernel. Values
+ * are read in the following order:
+ * - a required start alignment (default 0x200000)
+ * - a minimum address for the start of the image (default 0; see below)
+ * - a maximum address for the last byte of the image (default 0xffffffff)
+ *
+ * When this note specifies an alignment value, it is used. Otherwise the
+ * maximum p_align value from loadable ELF Program Headers is used, if it is
+ * greater than or equal to 4k (0x1000). Otherwise, the default is used.
+ */
+#define XEN_ELFNOTE_PHYS32_RELOC 19
+
+/*
* The number of the highest elfnote defined.
*/
-#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_ENTRY
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_RELOC
+
+/*
+ * System information exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
+ * note in case of a system crash. This note will contain various
+ * information about the system, see xen/include/xen/elfcore.h.
+ */
+#define XEN_ELFNOTE_CRASH_INFO 0x1000001
+
+/*
+ * System registers exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
+ * note per cpu in case of a system crash. This note is architecture
+ * specific and will contain registers not saved in the "CORE" note.
+ * See xen/include/xen/elfcore.h for more information.
+ */
+#define XEN_ELFNOTE_CRASH_REGS 0x1000002
+
+
+/*
+ * xen dump-core none note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
+ * in its dump file to indicate that the file is xen dump-core
+ * file. This note doesn't have any other information.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
+
+/*
+ * xen dump-core header note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
+ * in its dump file.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
+
+/*
+ * xen dump-core xen version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
+ * in its dump file. It contains the xen version obtained via the
+ * XENVER hypercall.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
+
+/*
+ * xen dump-core format version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
+ * in its dump file. It contains a format version identifier.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index a237af867873..df74e65a884b 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -256,6 +256,13 @@ struct physdev_pci_device_add {
*/
#define PHYSDEVOP_prepare_msix 30
#define PHYSDEVOP_release_msix 31
+/*
+ * Notify the hypervisor that a PCI device has been reset, so that any
+ * internally cached state is regenerated. Should be called after any
+ * device reset performed by the hardware domain.
+ */
+#define PHYSDEVOP_pci_device_reset 32
+
struct physdev_pci_device {
/* IN */
uint16_t seg;
@@ -263,6 +270,16 @@ struct physdev_pci_device {
uint8_t devfn;
};
+struct pci_device_reset {
+ struct physdev_pci_device dev;
+#define PCI_DEVICE_RESET_COLD 0x0
+#define PCI_DEVICE_RESET_WARM 0x1
+#define PCI_DEVICE_RESET_HOT 0x2
+#define PCI_DEVICE_RESET_FLR 0x3
+#define PCI_DEVICE_RESET_MASK 0x3
+ uint32_t flags;
+};
+
#define PHYSDEVOP_DBGP_RESET_PREPARE 1
#define PHYSDEVOP_DBGP_RESET_DONE 2
diff --git a/include/xen/pci.h b/include/xen/pci.h
index b8337cf85fd1..424b8ea89ca8 100644
--- a/include/xen/pci.h
+++ b/include/xen/pci.h
@@ -4,10 +4,16 @@
#define __XEN_PCI_H__
#if defined(CONFIG_XEN_DOM0)
+int xen_reset_device(const struct pci_dev *dev);
int xen_find_device_domain_owner(struct pci_dev *dev);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
+static inline int xen_reset_device(const struct pci_dev *dev)
+{
+ return -1;
+}
+
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
return -1;